LCOV - code coverage report
Current view: top level - src/backend/access/heap - heapam.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 91.8 % 2747 2521
Test Date: 2026-03-23 12:16:06 Functions: 100.0 % 82 82
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * heapam.c
       4              :  *    heap access method code
       5              :  *
       6              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       7              :  * Portions Copyright (c) 1994, Regents of the University of California
       8              :  *
       9              :  *
      10              :  * IDENTIFICATION
      11              :  *    src/backend/access/heap/heapam.c
      12              :  *
      13              :  *
      14              :  * INTERFACE ROUTINES
      15              :  *      heap_beginscan  - begin relation scan
      16              :  *      heap_rescan     - restart a relation scan
      17              :  *      heap_endscan    - end relation scan
      18              :  *      heap_getnext    - retrieve next tuple in scan
      19              :  *      heap_fetch      - retrieve tuple with given tid
      20              :  *      heap_insert     - insert tuple into a relation
      21              :  *      heap_multi_insert - insert multiple tuples into a relation
      22              :  *      heap_delete     - delete a tuple from a relation
      23              :  *      heap_update     - replace a tuple in a relation with another tuple
      24              :  *
      25              :  * NOTES
      26              :  *    This file contains the heap_ routines which implement
      27              :  *    the POSTGRES heap access method used for all POSTGRES
      28              :  *    relations.
      29              :  *
      30              :  *-------------------------------------------------------------------------
      31              :  */
      32              : #include "postgres.h"
      33              : 
      34              : #include "access/heapam.h"
      35              : #include "access/heaptoast.h"
      36              : #include "access/hio.h"
      37              : #include "access/multixact.h"
      38              : #include "access/subtrans.h"
      39              : #include "access/syncscan.h"
      40              : #include "access/valid.h"
      41              : #include "access/visibilitymap.h"
      42              : #include "access/xloginsert.h"
      43              : #include "catalog/pg_database.h"
      44              : #include "catalog/pg_database_d.h"
      45              : #include "commands/vacuum.h"
      46              : #include "pgstat.h"
      47              : #include "port/pg_bitutils.h"
      48              : #include "storage/lmgr.h"
      49              : #include "storage/predicate.h"
      50              : #include "storage/proc.h"
      51              : #include "storage/procarray.h"
      52              : #include "utils/datum.h"
      53              : #include "utils/injection_point.h"
      54              : #include "utils/inval.h"
      55              : #include "utils/spccache.h"
      56              : #include "utils/syscache.h"
      57              : 
      58              : 
      59              : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
      60              :                                      TransactionId xid, CommandId cid, int options);
      61              : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
      62              :                                   Buffer newbuf, HeapTuple oldtup,
      63              :                                   HeapTuple newtup, HeapTuple old_key_tuple,
      64              :                                   bool all_visible_cleared, bool new_all_visible_cleared);
      65              : #ifdef USE_ASSERT_CHECKING
      66              : static void check_lock_if_inplace_updateable_rel(Relation relation,
      67              :                                                  const ItemPointerData *otid,
      68              :                                                  HeapTuple newtup);
      69              : static void check_inplace_rel_lock(HeapTuple oldtup);
      70              : #endif
      71              : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
      72              :                                            Bitmapset *interesting_cols,
      73              :                                            Bitmapset *external_cols,
      74              :                                            HeapTuple oldtup, HeapTuple newtup,
      75              :                                            bool *has_external);
      76              : static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
      77              :                                  LockTupleMode mode, LockWaitPolicy wait_policy,
      78              :                                  bool *have_tuple_lock);
      79              : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
      80              :                                                    BlockNumber block,
      81              :                                                    ScanDirection dir);
      82              : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
      83              :                                                         ScanDirection dir);
      84              : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
      85              :                                       uint16 old_infomask2, TransactionId add_to_xmax,
      86              :                                       LockTupleMode mode, bool is_update,
      87              :                                       TransactionId *result_xmax, uint16 *result_infomask,
      88              :                                       uint16 *result_infomask2);
      89              : static TM_Result heap_lock_updated_tuple(Relation rel,
      90              :                                          uint16 prior_infomask,
      91              :                                          TransactionId prior_raw_xmax,
      92              :                                          const ItemPointerData *prior_ctid,
      93              :                                          TransactionId xid,
      94              :                                          LockTupleMode mode);
      95              : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
      96              :                                    uint16 *new_infomask2);
      97              : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
      98              :                                              uint16 t_infomask);
      99              : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
     100              :                                     LockTupleMode lockmode, bool *current_is_member);
     101              : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
     102              :                             Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
     103              :                             int *remaining);
     104              : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
     105              :                                        uint16 infomask, Relation rel, int *remaining,
     106              :                                        bool logLockFailure);
     107              : static void index_delete_sort(TM_IndexDeleteOp *delstate);
     108              : static int  bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
     109              : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
     110              : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
     111              :                                         bool *copy);
     112              : 
     113              : 
     114              : /*
     115              :  * This table lists the heavyweight lock mode that corresponds to each tuple
     116              :  * lock mode, as well as one or two corresponding MultiXactStatus values:
     117              :  * .lockstatus to merely lock tuples, and .updstatus to update them.  The
     118              :  * latter is set to -1 if the corresponding tuple lock mode does not allow
     119              :  * updating tuples -- see get_mxact_status_for_lock().
     120              :  *
     121              :  * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
     122              :  *
     123              :  * Don't look at lockstatus/updstatus directly!  Use get_mxact_status_for_lock
     124              :  * instead.
     125              :  */
     126              : static const struct
     127              : {
     128              :     LOCKMODE    hwlock;
     129              :     int         lockstatus;
     130              :     int         updstatus;
     131              : }           tupleLockExtraInfo[] =
     132              : 
     133              : {
     134              :     [LockTupleKeyShare] = {
     135              :         .hwlock = AccessShareLock,
     136              :         .lockstatus = MultiXactStatusForKeyShare,
     137              :         /* KeyShare does not allow updating tuples */
     138              :         .updstatus = -1
     139              :     },
     140              :     [LockTupleShare] = {
     141              :         .hwlock = RowShareLock,
     142              :         .lockstatus = MultiXactStatusForShare,
     143              :         /* Share does not allow updating tuples */
     144              :         .updstatus = -1
     145              :     },
     146              :     [LockTupleNoKeyExclusive] = {
     147              :         .hwlock = ExclusiveLock,
     148              :         .lockstatus = MultiXactStatusForNoKeyUpdate,
     149              :         .updstatus = MultiXactStatusNoKeyUpdate
     150              :     },
     151              :     [LockTupleExclusive] = {
     152              :         .hwlock = AccessExclusiveLock,
     153              :         .lockstatus = MultiXactStatusForUpdate,
     154              :         .updstatus = MultiXactStatusUpdate
     155              :     }
     156              : };
     157              : 
     158              : /* Get the LOCKMODE for a given MultiXactStatus */
     159              : #define LOCKMODE_from_mxstatus(status) \
     160              :             (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
     161              : 
     162              : /*
     163              :  * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
     164              :  * This is more readable than having every caller translate it to lock.h's
     165              :  * LOCKMODE.
     166              :  */
     167              : #define LockTupleTuplock(rel, tup, mode) \
     168              :     LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     169              : #define UnlockTupleTuplock(rel, tup, mode) \
     170              :     UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     171              : #define ConditionalLockTupleTuplock(rel, tup, mode, log) \
     172              :     ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
     173              : 
     174              : #ifdef USE_PREFETCH
     175              : /*
     176              :  * heap_index_delete_tuples and index_delete_prefetch_buffer use this
     177              :  * structure to coordinate prefetching activity
     178              :  */
     179              : typedef struct
     180              : {
     181              :     BlockNumber cur_hblkno;
     182              :     int         next_item;
     183              :     int         ndeltids;
     184              :     TM_IndexDelete *deltids;
     185              : } IndexDeletePrefetchState;
     186              : #endif
     187              : 
     188              : /* heap_index_delete_tuples bottom-up index deletion costing constants */
     189              : #define BOTTOMUP_MAX_NBLOCKS            6
     190              : #define BOTTOMUP_TOLERANCE_NBLOCKS      3
     191              : 
     192              : /*
     193              :  * heap_index_delete_tuples uses this when determining which heap blocks it
     194              :  * must visit to help its bottom-up index deletion caller
     195              :  */
     196              : typedef struct IndexDeleteCounts
     197              : {
     198              :     int16       npromisingtids; /* Number of "promising" TIDs in group */
     199              :     int16       ntids;          /* Number of TIDs in group */
     200              :     int16       ifirsttid;      /* Offset to group's first deltid */
     201              : } IndexDeleteCounts;
     202              : 
     203              : /*
     204              :  * This table maps tuple lock strength values for each particular
     205              :  * MultiXactStatus value.
     206              :  */
     207              : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
     208              : {
     209              :     LockTupleKeyShare,          /* ForKeyShare */
     210              :     LockTupleShare,             /* ForShare */
     211              :     LockTupleNoKeyExclusive,    /* ForNoKeyUpdate */
     212              :     LockTupleExclusive,         /* ForUpdate */
     213              :     LockTupleNoKeyExclusive,    /* NoKeyUpdate */
     214              :     LockTupleExclusive          /* Update */
     215              : };
     216              : 
     217              : /* Get the LockTupleMode for a given MultiXactStatus */
     218              : #define TUPLOCK_from_mxstatus(status) \
     219              :             (MultiXactStatusLock[(status)])
     220              : 
     221              : /*
     222              :  * Check that we have a valid snapshot if we might need TOAST access.
     223              :  */
     224              : static inline void
     225     12746958 : AssertHasSnapshotForToast(Relation rel)
     226              : {
     227              : #ifdef USE_ASSERT_CHECKING
     228              : 
     229              :     /* bootstrap mode in particular breaks this rule */
     230              :     if (!IsNormalProcessingMode())
     231              :         return;
     232              : 
     233              :     /* if the relation doesn't have a TOAST table, we are good */
     234              :     if (!OidIsValid(rel->rd_rel->reltoastrelid))
     235              :         return;
     236              : 
     237              :     Assert(HaveRegisteredOrActiveSnapshot());
     238              : 
     239              : #endif                          /* USE_ASSERT_CHECKING */
     240     12746958 : }
     241              : 
     242              : /* ----------------------------------------------------------------
     243              :  *                       heap support routines
     244              :  * ----------------------------------------------------------------
     245              :  */
     246              : 
     247              : /*
     248              :  * Streaming read API callback for parallel sequential scans. Returns the next
     249              :  * block the caller wants from the read stream or InvalidBlockNumber when done.
     250              :  */
     251              : static BlockNumber
     252       132439 : heap_scan_stream_read_next_parallel(ReadStream *stream,
     253              :                                     void *callback_private_data,
     254              :                                     void *per_buffer_data)
     255              : {
     256       132439 :     HeapScanDesc scan = (HeapScanDesc) callback_private_data;
     257              : 
     258              :     Assert(ScanDirectionIsForward(scan->rs_dir));
     259              :     Assert(scan->rs_base.rs_parallel);
     260              : 
     261       132439 :     if (unlikely(!scan->rs_inited))
     262              :     {
     263              :         /* parallel scan */
     264         2237 :         table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
     265         2237 :                                                  scan->rs_parallelworkerdata,
     266         2237 :                                                  (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel,
     267              :                                                  scan->rs_startblock,
     268              :                                                  scan->rs_numblocks);
     269              : 
     270              :         /* may return InvalidBlockNumber if there are no more blocks */
     271         4474 :         scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     272         2237 :                                                                     scan->rs_parallelworkerdata,
     273         2237 :                                                                     (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
     274         2237 :         scan->rs_inited = true;
     275              :     }
     276              :     else
     277              :     {
     278       130202 :         scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     279       130202 :                                                                     scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
     280       130202 :                                                                     scan->rs_base.rs_parallel);
     281              :     }
     282              : 
     283       132439 :     return scan->rs_prefetch_block;
     284              : }
     285              : 
     286              : /*
     287              :  * Streaming read API callback for serial sequential and TID range scans.
     288              :  * Returns the next block the caller wants from the read stream or
     289              :  * InvalidBlockNumber when done.
     290              :  */
     291              : static BlockNumber
     292      5497957 : heap_scan_stream_read_next_serial(ReadStream *stream,
     293              :                                   void *callback_private_data,
     294              :                                   void *per_buffer_data)
     295              : {
     296      5497957 :     HeapScanDesc scan = (HeapScanDesc) callback_private_data;
     297              : 
     298      5497957 :     if (unlikely(!scan->rs_inited))
     299              :     {
     300      1601696 :         scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
     301      1601696 :         scan->rs_inited = true;
     302              :     }
     303              :     else
     304      3896261 :         scan->rs_prefetch_block = heapgettup_advance_block(scan,
     305              :                                                            scan->rs_prefetch_block,
     306              :                                                            scan->rs_dir);
     307              : 
     308      5497957 :     return scan->rs_prefetch_block;
     309              : }
     310              : 
     311              : /*
     312              :  * Read stream API callback for bitmap heap scans.
     313              :  * Returns the next block the caller wants from the read stream or
     314              :  * InvalidBlockNumber when done.
     315              :  */
     316              : static BlockNumber
     317       263978 : bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
     318              :                             void *per_buffer_data)
     319              : {
     320       263978 :     TBMIterateResult *tbmres = per_buffer_data;
     321       263978 :     BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
     322       263978 :     HeapScanDesc hscan = (HeapScanDesc) bscan;
     323       263978 :     TableScanDesc sscan = &hscan->rs_base;
     324              : 
     325              :     for (;;)
     326              :     {
     327       263978 :         CHECK_FOR_INTERRUPTS();
     328              : 
     329              :         /* no more entries in the bitmap */
     330       263978 :         if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
     331        15631 :             return InvalidBlockNumber;
     332              : 
     333              :         /*
     334              :          * Ignore any claimed entries past what we think is the end of the
     335              :          * relation. It may have been extended after the start of our scan (we
     336              :          * only hold an AccessShareLock, and it could be inserts from this
     337              :          * backend).  We don't take this optimization in SERIALIZABLE
     338              :          * isolation though, as we need to examine all invisible tuples
     339              :          * reachable by the index.
     340              :          */
     341       248347 :         if (!IsolationIsSerializable() &&
     342       248238 :             tbmres->blockno >= hscan->rs_nblocks)
     343            0 :             continue;
     344              : 
     345       248347 :         return tbmres->blockno;
     346              :     }
     347              : 
     348              :     /* not reachable */
     349              :     Assert(false);
     350              : }
     351              : 
     352              : /* ----------------
     353              :  *      initscan - scan code common to heap_beginscan and heap_rescan
     354              :  * ----------------
     355              :  */
     356              : static void
     357      1631579 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
     358              : {
     359      1631579 :     ParallelBlockTableScanDesc bpscan = NULL;
     360              :     bool        allow_strat;
     361              :     bool        allow_sync;
     362              : 
     363              :     /*
     364              :      * Determine the number of blocks we have to scan.
     365              :      *
     366              :      * It is sufficient to do this once at scan start, since any tuples added
     367              :      * while the scan is in progress will be invisible to my snapshot anyway.
     368              :      * (That is not true when using a non-MVCC snapshot.  However, we couldn't
     369              :      * guarantee to return tuples added after scan start anyway, since they
     370              :      * might go into pages we already scanned.  To guarantee consistent
     371              :      * results for a non-MVCC snapshot, the caller must hold some higher-level
     372              :      * lock that ensures the interesting tuple(s) won't change.)
     373              :      */
     374      1631579 :     if (scan->rs_base.rs_parallel != NULL)
     375              :     {
     376         3017 :         bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     377         3017 :         scan->rs_nblocks = bpscan->phs_nblocks;
     378              :     }
     379              :     else
     380      1628562 :         scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
     381              : 
     382              :     /*
     383              :      * If the table is large relative to NBuffers, use a bulk-read access
     384              :      * strategy and enable synchronized scanning (see syncscan.c).  Although
     385              :      * the thresholds for these features could be different, we make them the
     386              :      * same so that there are only two behaviors to tune rather than four.
     387              :      * (However, some callers need to be able to disable one or both of these
     388              :      * behaviors, independently of the size of the table; also there is a GUC
     389              :      * variable that can disable synchronized scanning.)
     390              :      *
     391              :      * Note that table_block_parallelscan_initialize has a very similar test;
     392              :      * if you change this, consider changing that one, too.
     393              :      */
     394      1631577 :     if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
     395      1621796 :         scan->rs_nblocks > NBuffers / 4)
     396              :     {
     397        14305 :         allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
     398        14305 :         allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
     399              :     }
     400              :     else
     401      1617272 :         allow_strat = allow_sync = false;
     402              : 
     403      1631577 :     if (allow_strat)
     404              :     {
     405              :         /* During a rescan, keep the previous strategy object. */
     406        12966 :         if (scan->rs_strategy == NULL)
     407        12771 :             scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
     408              :     }
     409              :     else
     410              :     {
     411      1618611 :         if (scan->rs_strategy != NULL)
     412            0 :             FreeAccessStrategy(scan->rs_strategy);
     413      1618611 :         scan->rs_strategy = NULL;
     414              :     }
     415              : 
     416      1631577 :     if (scan->rs_base.rs_parallel != NULL)
     417              :     {
     418              :         /* For parallel scan, believe whatever ParallelTableScanDesc says. */
     419         3017 :         if (scan->rs_base.rs_parallel->phs_syncscan)
     420            2 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     421              :         else
     422         3015 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     423              : 
     424              :         /*
     425              :          * If not rescanning, initialize the startblock.  Finding the actual
     426              :          * start location is done in table_block_parallelscan_startblock_init,
     427              :          * based on whether an alternative start location has been set with
     428              :          * heap_setscanlimits, or using the syncscan location, when syncscan
     429              :          * is enabled.
     430              :          */
     431         3017 :         if (!keep_startblock)
     432         2865 :             scan->rs_startblock = InvalidBlockNumber;
     433              :     }
     434              :     else
     435              :     {
     436      1628560 :         if (keep_startblock)
     437              :         {
     438              :             /*
     439              :              * When rescanning, we want to keep the previous startblock
     440              :              * setting, so that rewinding a cursor doesn't generate surprising
     441              :              * results.  Reset the active syncscan setting, though.
     442              :              */
     443       864463 :             if (allow_sync && synchronize_seqscans)
     444           50 :                 scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     445              :             else
     446       864413 :                 scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     447              :         }
     448       764097 :         else if (allow_sync && synchronize_seqscans)
     449              :         {
     450           73 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     451           73 :             scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
     452              :         }
     453              :         else
     454              :         {
     455       764024 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     456       764024 :             scan->rs_startblock = 0;
     457              :         }
     458              :     }
     459              : 
     460      1631577 :     scan->rs_numblocks = InvalidBlockNumber;
     461      1631577 :     scan->rs_inited = false;
     462      1631577 :     scan->rs_ctup.t_data = NULL;
     463      1631577 :     ItemPointerSetInvalid(&scan->rs_ctup.t_self);
     464      1631577 :     scan->rs_cbuf = InvalidBuffer;
     465      1631577 :     scan->rs_cblock = InvalidBlockNumber;
     466      1631577 :     scan->rs_ntuples = 0;
     467      1631577 :     scan->rs_cindex = 0;
     468              : 
     469              :     /*
     470              :      * Initialize to ForwardScanDirection because it is most common and
     471              :      * because heap scans go forward before going backward (e.g. CURSORs).
     472              :      */
     473      1631577 :     scan->rs_dir = ForwardScanDirection;
     474      1631577 :     scan->rs_prefetch_block = InvalidBlockNumber;
     475              : 
     476              :     /* page-at-a-time fields are always invalid when not rs_inited */
     477              : 
     478              :     /*
     479              :      * copy the scan key, if appropriate
     480              :      */
     481      1631577 :     if (key != NULL && scan->rs_base.rs_nkeys > 0)
     482       252249 :         memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
     483              : 
     484              :     /*
     485              :      * Currently, we only have a stats counter for sequential heap scans (but
     486              :      * e.g for bitmap scans the underlying bitmap index scans will be counted,
     487              :      * and for sample scans we update stats for tuple fetches).
     488              :      */
     489      1631577 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
     490      1603374 :         pgstat_count_heap_scan(scan->rs_base.rs_rd);
     491      1631577 : }
     492              : 
     493              : /*
     494              :  * heap_setscanlimits - restrict range of a heapscan
     495              :  *
     496              :  * startBlk is the page to start at
     497              :  * numBlks is number of pages to scan (InvalidBlockNumber means "all")
     498              :  */
     499              : void
     500         3274 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
     501              : {
     502         3274 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     503              : 
     504              :     Assert(!scan->rs_inited);    /* else too late to change */
     505              :     /* else rs_startblock is significant */
     506              :     Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
     507              : 
     508              :     /* Check startBlk is valid (but allow case of zero blocks...) */
     509              :     Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
     510              : 
     511         3274 :     scan->rs_startblock = startBlk;
     512         3274 :     scan->rs_numblocks = numBlks;
     513         3274 : }
     514              : 
     515              : /*
     516              :  * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
     517              :  * multiple times, with constant arguments for all_visible,
     518              :  * check_serializable.
     519              :  */
     520              : pg_attribute_always_inline
     521              : static int
     522      3891536 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
     523              :                     Page page, Buffer buffer,
     524              :                     BlockNumber block, int lines,
     525              :                     bool all_visible, bool check_serializable)
     526              : {
     527      3891536 :     Oid         relid = RelationGetRelid(scan->rs_base.rs_rd);
     528      3891536 :     int         ntup = 0;
     529      3891536 :     int         nvis = 0;
     530              :     BatchMVCCState batchmvcc;
     531              : 
     532              :     /* page at a time should have been disabled otherwise */
     533              :     Assert(IsMVCCSnapshot(snapshot));
     534              : 
     535              :     /* first find all tuples on the page */
     536    213691236 :     for (OffsetNumber lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
     537              :     {
     538    209799700 :         ItemId      lpp = PageGetItemId(page, lineoff);
     539              :         HeapTuple   tup;
     540              : 
     541    209799700 :         if (unlikely(!ItemIdIsNormal(lpp)))
     542     42499806 :             continue;
     543              : 
     544              :         /*
     545              :          * If the page is not all-visible or we need to check serializability,
     546              :          * maintain enough state to be able to refind the tuple efficiently,
     547              :          * without again first needing to fetch the item and then via that the
     548              :          * tuple.
     549              :          */
     550    167299894 :         if (!all_visible || check_serializable)
     551              :         {
     552     83370084 :             tup = &batchmvcc.tuples[ntup];
     553              : 
     554     83370084 :             tup->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
     555     83370084 :             tup->t_len = ItemIdGetLength(lpp);
     556     83370084 :             tup->t_tableOid = relid;
     557     83370084 :             ItemPointerSet(&(tup->t_self), block, lineoff);
     558              :         }
     559              : 
     560              :         /*
     561              :          * If the page is all visible, these fields otherwise won't be
     562              :          * populated in loop below.
     563              :          */
     564    167299894 :         if (all_visible)
     565              :         {
     566     83929810 :             if (check_serializable)
     567              :             {
     568            0 :                 batchmvcc.visible[ntup] = true;
     569              :             }
     570     83929810 :             scan->rs_vistuples[ntup] = lineoff;
     571              :         }
     572              : 
     573    167299894 :         ntup++;
     574              :     }
     575              : 
     576              :     Assert(ntup <= MaxHeapTuplesPerPage);
     577              : 
     578              :     /*
     579              :      * Unless the page is all visible, test visibility for all tuples one go.
     580              :      * That is considerably more efficient than calling
     581              :      * HeapTupleSatisfiesMVCC() one-by-one.
     582              :      */
     583      3891536 :     if (all_visible)
     584      1582810 :         nvis = ntup;
     585              :     else
     586      2308726 :         nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer,
     587              :                                            ntup,
     588              :                                            &batchmvcc,
     589      2308726 :                                            scan->rs_vistuples);
     590              : 
     591              :     /*
     592              :      * So far we don't have batch API for testing serializabilty, so do so
     593              :      * one-by-one.
     594              :      */
     595      3891536 :     if (check_serializable)
     596              :     {
     597         2076 :         for (int i = 0; i < ntup; i++)
     598              :         {
     599         1454 :             HeapCheckForSerializableConflictOut(batchmvcc.visible[i],
     600              :                                                 scan->rs_base.rs_rd,
     601              :                                                 &batchmvcc.tuples[i],
     602              :                                                 buffer, snapshot);
     603              :         }
     604              :     }
     605              : 
     606      3891528 :     return nvis;
     607              : }
     608              : 
     609              : /*
     610              :  * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
     611              :  *
     612              :  * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
     613              :  * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
     614              :  */
     615              : void
     616      3891536 : heap_prepare_pagescan(TableScanDesc sscan)
     617              : {
     618      3891536 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     619      3891536 :     Buffer      buffer = scan->rs_cbuf;
     620      3891536 :     BlockNumber block = scan->rs_cblock;
     621              :     Snapshot    snapshot;
     622              :     Page        page;
     623              :     int         lines;
     624              :     bool        all_visible;
     625              :     bool        check_serializable;
     626              : 
     627              :     Assert(BufferGetBlockNumber(buffer) == block);
     628              : 
     629              :     /* ensure we're not accidentally being used when not in pagemode */
     630              :     Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
     631      3891536 :     snapshot = scan->rs_base.rs_snapshot;
     632              : 
     633              :     /*
     634              :      * Prune and repair fragmentation for the whole page, if possible.
     635              :      */
     636      3891536 :     heap_page_prune_opt(scan->rs_base.rs_rd, buffer, &scan->rs_vmbuffer);
     637              : 
     638              :     /*
     639              :      * We must hold share lock on the buffer content while examining tuple
     640              :      * visibility.  Afterwards, however, the tuples we have found to be
     641              :      * visible are guaranteed good as long as we hold the buffer pin.
     642              :      */
     643      3891536 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
     644              : 
     645      3891536 :     page = BufferGetPage(buffer);
     646      3891536 :     lines = PageGetMaxOffsetNumber(page);
     647              : 
     648              :     /*
     649              :      * If the all-visible flag indicates that all tuples on the page are
     650              :      * visible to everyone, we can skip the per-tuple visibility tests.
     651              :      *
     652              :      * Note: In hot standby, a tuple that's already visible to all
     653              :      * transactions on the primary might still be invisible to a read-only
     654              :      * transaction in the standby. We partly handle this problem by tracking
     655              :      * the minimum xmin of visible tuples as the cut-off XID while marking a
     656              :      * page all-visible on the primary and WAL log that along with the
     657              :      * visibility map SET operation. In hot standby, we wait for (or abort)
     658              :      * all transactions that can potentially may not see one or more tuples on
     659              :      * the page. That's how index-only scans work fine in hot standby. A
     660              :      * crucial difference between index-only scans and heap scans is that the
     661              :      * index-only scan completely relies on the visibility map where as heap
     662              :      * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
     663              :      * the page-level flag can be trusted in the same way, because it might
     664              :      * get propagated somehow without being explicitly WAL-logged, e.g. via a
     665              :      * full page write. Until we can prove that beyond doubt, let's check each
     666              :      * tuple for visibility the hard way.
     667              :      */
     668      3891536 :     all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
     669              :     check_serializable =
     670      3891536 :         CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
     671              : 
     672              :     /*
     673              :      * We call page_collect_tuples() with constant arguments, to get the
     674              :      * compiler to constant fold the constant arguments. Separate calls with
     675              :      * constant arguments, rather than variables, are needed on several
     676              :      * compilers to actually perform constant folding.
     677              :      */
     678      3891536 :     if (likely(all_visible))
     679              :     {
     680      1582810 :         if (likely(!check_serializable))
     681      1582810 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     682              :                                                    block, lines, true, false);
     683              :         else
     684            0 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     685              :                                                    block, lines, true, true);
     686              :     }
     687              :     else
     688              :     {
     689      2308726 :         if (likely(!check_serializable))
     690      2308096 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     691              :                                                    block, lines, false, false);
     692              :         else
     693          630 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     694              :                                                    block, lines, false, true);
     695              :     }
     696              : 
     697      3891528 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
     698      3891528 : }
     699              : 
     700              : /*
     701              :  * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
     702              :  *
     703              :  * Read the next block of the scan relation from the read stream and save it
     704              :  * in the scan descriptor.  It is already pinned.
     705              :  */
     706              : static inline void
     707      5119213 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
     708              : {
     709              :     Assert(scan->rs_read_stream);
     710              : 
     711              :     /* release previous scan buffer, if any */
     712      5119213 :     if (BufferIsValid(scan->rs_cbuf))
     713              :     {
     714      3515276 :         ReleaseBuffer(scan->rs_cbuf);
     715      3515276 :         scan->rs_cbuf = InvalidBuffer;
     716              :     }
     717              : 
     718              :     /*
     719              :      * Be sure to check for interrupts at least once per page.  Checks at
     720              :      * higher code levels won't be able to stop a seqscan that encounters many
     721              :      * pages' worth of consecutive dead tuples.
     722              :      */
     723      5119213 :     CHECK_FOR_INTERRUPTS();
     724              : 
     725              :     /*
     726              :      * If the scan direction is changing, reset the prefetch block to the
     727              :      * current block. Otherwise, we will incorrectly prefetch the blocks
     728              :      * between the prefetch block and the current block again before
     729              :      * prefetching blocks in the new, correct scan direction.
     730              :      */
     731      5119210 :     if (unlikely(scan->rs_dir != dir))
     732              :     {
     733          102 :         scan->rs_prefetch_block = scan->rs_cblock;
     734          102 :         read_stream_reset(scan->rs_read_stream);
     735              :     }
     736              : 
     737      5119210 :     scan->rs_dir = dir;
     738              : 
     739      5119210 :     scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
     740      5119183 :     if (BufferIsValid(scan->rs_cbuf))
     741      4001813 :         scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
     742      5119183 : }
     743              : 
     744              : /*
     745              :  * heapgettup_initial_block - return the first BlockNumber to scan
     746              :  *
     747              :  * Returns InvalidBlockNumber when there are no blocks to scan.  This can
     748              :  * occur with empty tables and in parallel scans when parallel workers get all
     749              :  * of the pages before we can get a chance to get our first page.
     750              :  */
     751              : static pg_noinline BlockNumber
     752      1601698 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
     753              : {
     754              :     Assert(!scan->rs_inited);
     755              :     Assert(scan->rs_base.rs_parallel == NULL);
     756              : 
     757              :     /* When there are no pages to scan, return InvalidBlockNumber */
     758      1601698 :     if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     759       757573 :         return InvalidBlockNumber;
     760              : 
     761       844125 :     if (ScanDirectionIsForward(dir))
     762              :     {
     763       844083 :         return scan->rs_startblock;
     764              :     }
     765              :     else
     766              :     {
     767              :         /*
     768              :          * Disable reporting to syncscan logic in a backwards scan; it's not
     769              :          * very likely anyone else is doing the same thing at the same time,
     770              :          * and much more likely that we'll just bollix things for forward
     771              :          * scanners.
     772              :          */
     773           42 :         scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     774              : 
     775              :         /*
     776              :          * Start from last page of the scan.  Ensure we take into account
     777              :          * rs_numblocks if it's been adjusted by heap_setscanlimits().
     778              :          */
     779           42 :         if (scan->rs_numblocks != InvalidBlockNumber)
     780            4 :             return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
     781              : 
     782           38 :         if (scan->rs_startblock > 0)
     783            0 :             return scan->rs_startblock - 1;
     784              : 
     785           38 :         return scan->rs_nblocks - 1;
     786              :     }
     787              : }
     788              : 
     789              : 
     790              : /*
     791              :  * heapgettup_start_page - helper function for heapgettup()
     792              :  *
     793              :  * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
     794              :  * to the number of tuples on this page.  Also set *lineoff to the first
     795              :  * offset to scan with forward scans getting the first offset and backward
     796              :  * getting the final offset on the page.
     797              :  */
     798              : static Page
     799       115963 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
     800              :                       OffsetNumber *lineoff)
     801              : {
     802              :     Page        page;
     803              : 
     804              :     Assert(scan->rs_inited);
     805              :     Assert(BufferIsValid(scan->rs_cbuf));
     806              : 
     807              :     /* Caller is responsible for ensuring buffer is locked if needed */
     808       115963 :     page = BufferGetPage(scan->rs_cbuf);
     809              : 
     810       115963 :     *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
     811              : 
     812       115963 :     if (ScanDirectionIsForward(dir))
     813       115963 :         *lineoff = FirstOffsetNumber;
     814              :     else
     815            0 :         *lineoff = (OffsetNumber) (*linesleft);
     816              : 
     817              :     /* lineoff now references the physically previous or next tid */
     818       115963 :     return page;
     819              : }
     820              : 
     821              : 
     822              : /*
     823              :  * heapgettup_continue_page - helper function for heapgettup()
     824              :  *
     825              :  * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
     826              :  * to the number of tuples left to scan on this page.  Also set *lineoff to
     827              :  * the next offset to scan according to the ScanDirection in 'dir'.
     828              :  */
     829              : static inline Page
     830      9235099 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
     831              :                          OffsetNumber *lineoff)
     832              : {
     833              :     Page        page;
     834              : 
     835              :     Assert(scan->rs_inited);
     836              :     Assert(BufferIsValid(scan->rs_cbuf));
     837              : 
     838              :     /* Caller is responsible for ensuring buffer is locked if needed */
     839      9235099 :     page = BufferGetPage(scan->rs_cbuf);
     840              : 
     841      9235099 :     if (ScanDirectionIsForward(dir))
     842              :     {
     843      9235099 :         *lineoff = OffsetNumberNext(scan->rs_coffset);
     844      9235099 :         *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
     845              :     }
     846              :     else
     847              :     {
     848              :         /*
     849              :          * The previous returned tuple may have been vacuumed since the
     850              :          * previous scan when we use a non-MVCC snapshot, so we must
     851              :          * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
     852              :          */
     853            0 :         *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
     854            0 :         *linesleft = *lineoff;
     855              :     }
     856              : 
     857              :     /* lineoff now references the physically previous or next tid */
     858      9235099 :     return page;
     859              : }
     860              : 
     861              : /*
     862              :  * heapgettup_advance_block - helper for heap_fetch_next_buffer()
     863              :  *
     864              :  * Given the current block number, the scan direction, and various information
     865              :  * contained in the scan descriptor, calculate the BlockNumber to scan next
     866              :  * and return it.  If there are no further blocks to scan, return
     867              :  * InvalidBlockNumber to indicate this fact to the caller.
     868              :  *
     869              :  * This should not be called to determine the initial block number -- only for
     870              :  * subsequent blocks.
     871              :  *
     872              :  * This also adjusts rs_numblocks when a limit has been imposed by
     873              :  * heap_setscanlimits().
     874              :  */
     875              : static inline BlockNumber
     876      3896263 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
     877              : {
     878              :     Assert(scan->rs_base.rs_parallel == NULL);
     879              : 
     880      3896263 :     if (likely(ScanDirectionIsForward(dir)))
     881              :     {
     882      3896185 :         block++;
     883              : 
     884              :         /* wrap back to the start of the heap */
     885      3896185 :         if (block >= scan->rs_nblocks)
     886       729956 :             block = 0;
     887              : 
     888              :         /*
     889              :          * Report our new scan position for synchronization purposes. We don't
     890              :          * do that when moving backwards, however. That would just mess up any
     891              :          * other forward-moving scanners.
     892              :          *
     893              :          * Note: we do this before checking for end of scan so that the final
     894              :          * state of the position hint is back at the start of the rel.  That's
     895              :          * not strictly necessary, but otherwise when you run the same query
     896              :          * multiple times the starting position would shift a little bit
     897              :          * backwards on every invocation, which is confusing. We don't
     898              :          * guarantee any specific ordering in general, though.
     899              :          */
     900      3896185 :         if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
     901        11629 :             ss_report_location(scan->rs_base.rs_rd, block);
     902              : 
     903              :         /* we're done if we're back at where we started */
     904      3896185 :         if (block == scan->rs_startblock)
     905       729906 :             return InvalidBlockNumber;
     906              : 
     907              :         /* check if the limit imposed by heap_setscanlimits() is met */
     908      3166279 :         if (scan->rs_numblocks != InvalidBlockNumber)
     909              :         {
     910         2822 :             if (--scan->rs_numblocks == 0)
     911         1590 :                 return InvalidBlockNumber;
     912              :         }
     913              : 
     914      3164689 :         return block;
     915              :     }
     916              :     else
     917              :     {
     918              :         /* we're done if the last block is the start position */
     919           78 :         if (block == scan->rs_startblock)
     920           78 :             return InvalidBlockNumber;
     921              : 
     922              :         /* check if the limit imposed by heap_setscanlimits() is met */
     923            0 :         if (scan->rs_numblocks != InvalidBlockNumber)
     924              :         {
     925            0 :             if (--scan->rs_numblocks == 0)
     926            0 :                 return InvalidBlockNumber;
     927              :         }
     928              : 
     929              :         /* wrap to the end of the heap when the last page was page 0 */
     930            0 :         if (block == 0)
     931            0 :             block = scan->rs_nblocks;
     932              : 
     933            0 :         block--;
     934              : 
     935            0 :         return block;
     936              :     }
     937              : }
     938              : 
     939              : /* ----------------
     940              :  *      heapgettup - fetch next heap tuple
     941              :  *
     942              :  *      Initialize the scan if not already done; then advance to the next
     943              :  *      tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
     944              :  *      or set scan->rs_ctup.t_data = NULL if no more tuples.
     945              :  *
     946              :  * Note: the reason nkeys/key are passed separately, even though they are
     947              :  * kept in the scan descriptor, is that the caller may not want us to check
     948              :  * the scankeys.
     949              :  *
     950              :  * Note: when we fall off the end of the scan in either direction, we
     951              :  * reset rs_inited.  This means that a further request with the same
     952              :  * scan direction will restart the scan, which is a bit odd, but a
     953              :  * request with the opposite scan direction will start a fresh scan
     954              :  * in the proper direction.  The latter is required behavior for cursors,
     955              :  * while the former case is generally undefined behavior in Postgres
     956              :  * so we don't care too much.
     957              :  * ----------------
     958              :  */
     959              : static void
     960      9261371 : heapgettup(HeapScanDesc scan,
     961              :            ScanDirection dir,
     962              :            int nkeys,
     963              :            ScanKey key)
     964              : {
     965      9261371 :     HeapTuple   tuple = &(scan->rs_ctup);
     966              :     Page        page;
     967              :     OffsetNumber lineoff;
     968              :     int         linesleft;
     969              : 
     970      9261371 :     if (likely(scan->rs_inited))
     971              :     {
     972              :         /* continue from previously returned page/tuple */
     973      9235099 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     974      9235099 :         page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
     975      9235099 :         goto continue_page;
     976              :     }
     977              : 
     978              :     /*
     979              :      * advance the scan until we find a qualifying tuple or run out of stuff
     980              :      * to scan
     981              :      */
     982              :     while (true)
     983              :     {
     984       141368 :         heap_fetch_next_buffer(scan, dir);
     985              : 
     986              :         /* did we run out of blocks to scan? */
     987       141368 :         if (!BufferIsValid(scan->rs_cbuf))
     988        25405 :             break;
     989              : 
     990              :         Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
     991              : 
     992       115963 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     993       115963 :         page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
     994      9351062 : continue_page:
     995              : 
     996              :         /*
     997              :          * Only continue scanning the page while we have lines left.
     998              :          *
     999              :          * Note that this protects us from accessing line pointers past
    1000              :          * PageGetMaxOffsetNumber(); both for forward scans when we resume the
    1001              :          * table scan, and for when we start scanning a new page.
    1002              :          */
    1003      9420535 :         for (; linesleft > 0; linesleft--, lineoff += dir)
    1004              :         {
    1005              :             bool        visible;
    1006      9305439 :             ItemId      lpp = PageGetItemId(page, lineoff);
    1007              : 
    1008      9305439 :             if (!ItemIdIsNormal(lpp))
    1009        47437 :                 continue;
    1010              : 
    1011      9258002 :             tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
    1012      9258002 :             tuple->t_len = ItemIdGetLength(lpp);
    1013      9258002 :             ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
    1014              : 
    1015      9258002 :             visible = HeapTupleSatisfiesVisibility(tuple,
    1016              :                                                    scan->rs_base.rs_snapshot,
    1017              :                                                    scan->rs_cbuf);
    1018              : 
    1019      9258002 :             HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
    1020              :                                                 tuple, scan->rs_cbuf,
    1021              :                                                 scan->rs_base.rs_snapshot);
    1022              : 
    1023              :             /* skip tuples not visible to this snapshot */
    1024      9258002 :             if (!visible)
    1025         7713 :                 continue;
    1026              : 
    1027              :             /* skip any tuples that don't match the scan key */
    1028      9250289 :             if (key != NULL &&
    1029        15039 :                 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
    1030              :                              nkeys, key))
    1031        14323 :                 continue;
    1032              : 
    1033      9235966 :             LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
    1034      9235966 :             scan->rs_coffset = lineoff;
    1035      9235966 :             return;
    1036              :         }
    1037              : 
    1038              :         /*
    1039              :          * if we get here, it means we've exhausted the items on this page and
    1040              :          * it's time to move to the next.
    1041              :          */
    1042       115096 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
    1043              :     }
    1044              : 
    1045              :     /* end of scan */
    1046        25405 :     if (BufferIsValid(scan->rs_cbuf))
    1047            0 :         ReleaseBuffer(scan->rs_cbuf);
    1048              : 
    1049        25405 :     scan->rs_cbuf = InvalidBuffer;
    1050        25405 :     scan->rs_cblock = InvalidBlockNumber;
    1051        25405 :     scan->rs_prefetch_block = InvalidBlockNumber;
    1052        25405 :     tuple->t_data = NULL;
    1053        25405 :     scan->rs_inited = false;
    1054              : }
    1055              : 
    1056              : /* ----------------
    1057              :  *      heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
    1058              :  *
    1059              :  *      Same API as heapgettup, but used in page-at-a-time mode
    1060              :  *
    1061              :  * The internal logic is much the same as heapgettup's too, but there are some
    1062              :  * differences: we do not take the buffer content lock (that only needs to
    1063              :  * happen inside heap_prepare_pagescan), and we iterate through just the
    1064              :  * tuples listed in rs_vistuples[] rather than all tuples on the page.  Notice
    1065              :  * that lineindex is 0-based, where the corresponding loop variable lineoff in
    1066              :  * heapgettup is 1-based.
    1067              :  * ----------------
    1068              :  */
    1069              : static void
    1070     70726326 : heapgettup_pagemode(HeapScanDesc scan,
    1071              :                     ScanDirection dir,
    1072              :                     int nkeys,
    1073              :                     ScanKey key)
    1074              : {
    1075     70726326 :     HeapTuple   tuple = &(scan->rs_ctup);
    1076              :     Page        page;
    1077              :     uint32      lineindex;
    1078              :     uint32      linesleft;
    1079              : 
    1080     70726326 :     if (likely(scan->rs_inited))
    1081              :     {
    1082              :         /* continue from previously returned page/tuple */
    1083     69148661 :         page = BufferGetPage(scan->rs_cbuf);
    1084              : 
    1085     69148661 :         lineindex = scan->rs_cindex + dir;
    1086     69148661 :         if (ScanDirectionIsForward(dir))
    1087     69148223 :             linesleft = scan->rs_ntuples - lineindex;
    1088              :         else
    1089          438 :             linesleft = scan->rs_cindex;
    1090              :         /* lineindex now references the next or previous visible tid */
    1091              : 
    1092     69148661 :         goto continue_page;
    1093              :     }
    1094              : 
    1095              :     /*
    1096              :      * advance the scan until we find a qualifying tuple or run out of stuff
    1097              :      * to scan
    1098              :      */
    1099              :     while (true)
    1100              :     {
    1101      4977845 :         heap_fetch_next_buffer(scan, dir);
    1102              : 
    1103              :         /* did we run out of blocks to scan? */
    1104      4977815 :         if (!BufferIsValid(scan->rs_cbuf))
    1105      1091965 :             break;
    1106              : 
    1107              :         Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
    1108              : 
    1109              :         /* prune the page and determine visible tuple offsets */
    1110      3885850 :         heap_prepare_pagescan((TableScanDesc) scan);
    1111      3885842 :         page = BufferGetPage(scan->rs_cbuf);
    1112      3885842 :         linesleft = scan->rs_ntuples;
    1113      3885842 :         lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
    1114              : 
    1115              :         /* block is the same for all tuples, set it once outside the loop */
    1116      3885842 :         ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
    1117              : 
    1118              :         /* lineindex now references the next or previous visible tid */
    1119     73034503 : continue_page:
    1120              : 
    1121    130714363 :         for (; linesleft > 0; linesleft--, lineindex += dir)
    1122              :         {
    1123              :             ItemId      lpp;
    1124              :             OffsetNumber lineoff;
    1125              : 
    1126              :             Assert(lineindex < scan->rs_ntuples);
    1127    127314183 :             lineoff = scan->rs_vistuples[lineindex];
    1128    127314183 :             lpp = PageGetItemId(page, lineoff);
    1129              :             Assert(ItemIdIsNormal(lpp));
    1130              : 
    1131    127314183 :             tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
    1132    127314183 :             tuple->t_len = ItemIdGetLength(lpp);
    1133    127314183 :             ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
    1134              : 
    1135              :             /* skip any tuples that don't match the scan key */
    1136    127314183 :             if (key != NULL &&
    1137     58112217 :                 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
    1138              :                              nkeys, key))
    1139     57679860 :                 continue;
    1140              : 
    1141     69634323 :             scan->rs_cindex = lineindex;
    1142     69634323 :             return;
    1143              :         }
    1144              :     }
    1145              : 
    1146              :     /* end of scan */
    1147      1091965 :     if (BufferIsValid(scan->rs_cbuf))
    1148            0 :         ReleaseBuffer(scan->rs_cbuf);
    1149      1091965 :     scan->rs_cbuf = InvalidBuffer;
    1150      1091965 :     scan->rs_cblock = InvalidBlockNumber;
    1151      1091965 :     scan->rs_prefetch_block = InvalidBlockNumber;
    1152      1091965 :     tuple->t_data = NULL;
    1153      1091965 :     scan->rs_inited = false;
    1154              : }
    1155              : 
    1156              : 
    1157              : /* ----------------------------------------------------------------
    1158              :  *                   heap access method interface
    1159              :  * ----------------------------------------------------------------
    1160              :  */
    1161              : 
    1162              : 
    1163              : TableScanDesc
    1164       766964 : heap_beginscan(Relation relation, Snapshot snapshot,
    1165              :                int nkeys, ScanKey key,
    1166              :                ParallelTableScanDesc parallel_scan,
    1167              :                uint32 flags)
    1168              : {
    1169              :     HeapScanDesc scan;
    1170              : 
    1171              :     /*
    1172              :      * increment relation ref count while scanning relation
    1173              :      *
    1174              :      * This is just to make really sure the relcache entry won't go away while
    1175              :      * the scan has a pointer to it.  Caller should be holding the rel open
    1176              :      * anyway, so this is redundant in all normal scenarios...
    1177              :      */
    1178       766964 :     RelationIncrementReferenceCount(relation);
    1179              : 
    1180              :     /*
    1181              :      * allocate and initialize scan descriptor
    1182              :      */
    1183       766964 :     if (flags & SO_TYPE_BITMAPSCAN)
    1184              :     {
    1185        13346 :         BitmapHeapScanDesc bscan = palloc_object(BitmapHeapScanDescData);
    1186              : 
    1187              :         /*
    1188              :          * Bitmap Heap scans do not have any fields that a normal Heap Scan
    1189              :          * does not have, so no special initializations required here.
    1190              :          */
    1191        13346 :         scan = (HeapScanDesc) bscan;
    1192              :     }
    1193              :     else
    1194       753618 :         scan = (HeapScanDesc) palloc_object(HeapScanDescData);
    1195              : 
    1196       766964 :     scan->rs_base.rs_rd = relation;
    1197       766964 :     scan->rs_base.rs_snapshot = snapshot;
    1198       766964 :     scan->rs_base.rs_nkeys = nkeys;
    1199       766964 :     scan->rs_base.rs_flags = flags;
    1200       766964 :     scan->rs_base.rs_parallel = parallel_scan;
    1201       766964 :     scan->rs_strategy = NULL;    /* set in initscan */
    1202       766964 :     scan->rs_cbuf = InvalidBuffer;
    1203              : 
    1204              :     /*
    1205              :      * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
    1206              :      */
    1207       766964 :     if (!(snapshot && IsMVCCSnapshot(snapshot)))
    1208        36776 :         scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1209              : 
    1210              :     /* Check that a historic snapshot is not used for non-catalog tables */
    1211       766964 :     if (snapshot &&
    1212       756460 :         IsHistoricMVCCSnapshot(snapshot) &&
    1213          716 :         !RelationIsAccessibleInLogicalDecoding(relation))
    1214              :     {
    1215            0 :         ereport(ERROR,
    1216              :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    1217              :                  errmsg("cannot query non-catalog table \"%s\" during logical decoding",
    1218              :                         RelationGetRelationName(relation))));
    1219              :     }
    1220              : 
    1221              :     /*
    1222              :      * For seqscan and sample scans in a serializable transaction, acquire a
    1223              :      * predicate lock on the entire relation. This is required not only to
    1224              :      * lock all the matching tuples, but also to conflict with new insertions
    1225              :      * into the table. In an indexscan, we take page locks on the index pages
    1226              :      * covering the range specified in the scan qual, but in a heap scan there
    1227              :      * is nothing more fine-grained to lock. A bitmap scan is a different
    1228              :      * story, there we have already scanned the index and locked the index
    1229              :      * pages covering the predicate. But in that case we still have to lock
    1230              :      * any matching heap tuples. For sample scan we could optimize the locking
    1231              :      * to be at least page-level granularity, but we'd need to add per-tuple
    1232              :      * locking for that.
    1233              :      */
    1234       766964 :     if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
    1235              :     {
    1236              :         /*
    1237              :          * Ensure a missing snapshot is noticed reliably, even if the
    1238              :          * isolation mode means predicate locking isn't performed (and
    1239              :          * therefore the snapshot isn't used here).
    1240              :          */
    1241              :         Assert(snapshot);
    1242       741322 :         PredicateLockRelation(relation, snapshot);
    1243              :     }
    1244              : 
    1245              :     /* we only need to set this up once */
    1246       766964 :     scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
    1247              : 
    1248              :     /*
    1249              :      * Allocate memory to keep track of page allocation for parallel workers
    1250              :      * when doing a parallel scan.
    1251              :      */
    1252       766964 :     if (parallel_scan != NULL)
    1253         2865 :         scan->rs_parallelworkerdata = palloc_object(ParallelBlockTableScanWorkerData);
    1254              :     else
    1255       764099 :         scan->rs_parallelworkerdata = NULL;
    1256              : 
    1257              :     /*
    1258              :      * we do this here instead of in initscan() because heap_rescan also calls
    1259              :      * initscan() and we don't want to allocate memory again
    1260              :      */
    1261       766964 :     if (nkeys > 0)
    1262       252249 :         scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
    1263              :     else
    1264       514715 :         scan->rs_base.rs_key = NULL;
    1265              : 
    1266       766964 :     initscan(scan, key, false);
    1267              : 
    1268       766962 :     scan->rs_read_stream = NULL;
    1269              : 
    1270              :     /*
    1271              :      * Set up a read stream for sequential scans and TID range scans. This
    1272              :      * should be done after initscan() because initscan() allocates the
    1273              :      * BufferAccessStrategy object passed to the read stream API.
    1274              :      */
    1275       766962 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
    1276        25736 :         scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
    1277       742544 :     {
    1278              :         ReadStreamBlockNumberCB cb;
    1279              : 
    1280       742544 :         if (scan->rs_base.rs_parallel)
    1281         2865 :             cb = heap_scan_stream_read_next_parallel;
    1282              :         else
    1283       739679 :             cb = heap_scan_stream_read_next_serial;
    1284              : 
    1285              :         /* ---
    1286              :          * It is safe to use batchmode as the only locks taken by `cb`
    1287              :          * are never taken while waiting for IO:
    1288              :          * - SyncScanLock is used in the non-parallel case
    1289              :          * - in the parallel case, only spinlocks and atomics are used
    1290              :          * ---
    1291              :          */
    1292       742544 :         scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
    1293              :                                                           READ_STREAM_USE_BATCHING,
    1294              :                                                           scan->rs_strategy,
    1295              :                                                           scan->rs_base.rs_rd,
    1296              :                                                           MAIN_FORKNUM,
    1297              :                                                           cb,
    1298              :                                                           scan,
    1299              :                                                           0);
    1300              :     }
    1301        24418 :     else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
    1302              :     {
    1303        13346 :         scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT |
    1304              :                                                           READ_STREAM_USE_BATCHING,
    1305              :                                                           scan->rs_strategy,
    1306              :                                                           scan->rs_base.rs_rd,
    1307              :                                                           MAIN_FORKNUM,
    1308              :                                                           bitmapheap_stream_read_next,
    1309              :                                                           scan,
    1310              :                                                           sizeof(TBMIterateResult));
    1311              :     }
    1312              : 
    1313       766962 :     scan->rs_vmbuffer = InvalidBuffer;
    1314              : 
    1315       766962 :     return (TableScanDesc) scan;
    1316              : }
    1317              : 
    1318              : void
    1319       864615 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
    1320              :             bool allow_strat, bool allow_sync, bool allow_pagemode)
    1321              : {
    1322       864615 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1323              : 
    1324       864615 :     if (set_params)
    1325              :     {
    1326           19 :         if (allow_strat)
    1327           19 :             scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
    1328              :         else
    1329            0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
    1330              : 
    1331           19 :         if (allow_sync)
    1332            8 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
    1333              :         else
    1334           11 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
    1335              : 
    1336           19 :         if (allow_pagemode && scan->rs_base.rs_snapshot &&
    1337           19 :             IsMVCCSnapshot(scan->rs_base.rs_snapshot))
    1338           19 :             scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
    1339              :         else
    1340            0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1341              :     }
    1342              : 
    1343              :     /*
    1344              :      * unpin scan buffers
    1345              :      */
    1346       864615 :     if (BufferIsValid(scan->rs_cbuf))
    1347              :     {
    1348         3045 :         ReleaseBuffer(scan->rs_cbuf);
    1349         3045 :         scan->rs_cbuf = InvalidBuffer;
    1350              :     }
    1351              : 
    1352       864615 :     if (BufferIsValid(scan->rs_vmbuffer))
    1353              :     {
    1354           12 :         ReleaseBuffer(scan->rs_vmbuffer);
    1355           12 :         scan->rs_vmbuffer = InvalidBuffer;
    1356              :     }
    1357              : 
    1358              :     /*
    1359              :      * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
    1360              :      * additional data vs a normal HeapScan
    1361              :      */
    1362              : 
    1363              :     /*
    1364              :      * The read stream is reset on rescan. This must be done before
    1365              :      * initscan(), as some state referred to by read_stream_reset() is reset
    1366              :      * in initscan().
    1367              :      */
    1368       864615 :     if (scan->rs_read_stream)
    1369       864592 :         read_stream_reset(scan->rs_read_stream);
    1370              : 
    1371              :     /*
    1372              :      * reinitialize scan descriptor
    1373              :      */
    1374       864615 :     initscan(scan, key, true);
    1375       864615 : }
    1376              : 
    1377              : void
    1378       763757 : heap_endscan(TableScanDesc sscan)
    1379              : {
    1380       763757 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1381              : 
    1382              :     /* Note: no locking manipulations needed */
    1383              : 
    1384              :     /*
    1385              :      * unpin scan buffers
    1386              :      */
    1387       763757 :     if (BufferIsValid(scan->rs_cbuf))
    1388       481320 :         ReleaseBuffer(scan->rs_cbuf);
    1389              : 
    1390       763757 :     if (BufferIsValid(scan->rs_vmbuffer))
    1391         1473 :         ReleaseBuffer(scan->rs_vmbuffer);
    1392              : 
    1393              :     /*
    1394              :      * Must free the read stream before freeing the BufferAccessStrategy.
    1395              :      */
    1396       763757 :     if (scan->rs_read_stream)
    1397       752752 :         read_stream_end(scan->rs_read_stream);
    1398              : 
    1399              :     /*
    1400              :      * decrement relation reference count and free scan descriptor storage
    1401              :      */
    1402       763757 :     RelationDecrementReferenceCount(scan->rs_base.rs_rd);
    1403              : 
    1404       763757 :     if (scan->rs_base.rs_key)
    1405       252212 :         pfree(scan->rs_base.rs_key);
    1406              : 
    1407       763757 :     if (scan->rs_strategy != NULL)
    1408        12761 :         FreeAccessStrategy(scan->rs_strategy);
    1409              : 
    1410       763757 :     if (scan->rs_parallelworkerdata != NULL)
    1411         2865 :         pfree(scan->rs_parallelworkerdata);
    1412              : 
    1413       763757 :     if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
    1414        47653 :         UnregisterSnapshot(scan->rs_base.rs_snapshot);
    1415              : 
    1416       763757 :     pfree(scan);
    1417       763757 : }
    1418              : 
    1419              : HeapTuple
    1420     11583927 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
    1421              : {
    1422     11583927 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1423              : 
    1424              :     /*
    1425              :      * This is still widely used directly, without going through table AM, so
    1426              :      * add a safety check.  It's possible we should, at a later point,
    1427              :      * downgrade this to an assert. The reason for checking the AM routine,
    1428              :      * rather than the AM oid, is that this allows to write regression tests
    1429              :      * that create another AM reusing the heap handler.
    1430              :      */
    1431     11583927 :     if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
    1432            0 :         ereport(ERROR,
    1433              :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1434              :                  errmsg_internal("only heap AM is supported")));
    1435              : 
    1436              :     /* Note: no locking manipulations needed */
    1437              : 
    1438     11583927 :     if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
    1439      2913349 :         heapgettup_pagemode(scan, direction,
    1440      2913349 :                             scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1441              :     else
    1442      8670578 :         heapgettup(scan, direction,
    1443      8670578 :                    scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1444              : 
    1445     11583926 :     if (scan->rs_ctup.t_data == NULL)
    1446        78849 :         return NULL;
    1447              : 
    1448              :     /*
    1449              :      * if we get here it means we have a new current scan tuple, so point to
    1450              :      * the proper return buffer and return the tuple.
    1451              :      */
    1452              : 
    1453     11505077 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1454              : 
    1455     11505077 :     return &scan->rs_ctup;
    1456              : }
    1457              : 
    1458              : bool
    1459     68397130 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
    1460              : {
    1461     68397130 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1462              : 
    1463              :     /* Note: no locking manipulations needed */
    1464              : 
    1465     68397130 :     if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1466     67806337 :         heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1467              :     else
    1468       590793 :         heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1469              : 
    1470     68397103 :     if (scan->rs_ctup.t_data == NULL)
    1471              :     {
    1472      1038383 :         ExecClearTuple(slot);
    1473      1038383 :         return false;
    1474              :     }
    1475              : 
    1476              :     /*
    1477              :      * if we get here it means we have a new current scan tuple, so point to
    1478              :      * the proper return buffer and return the tuple.
    1479              :      */
    1480              : 
    1481     67358720 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1482              : 
    1483     67358720 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
    1484              :                              scan->rs_cbuf);
    1485     67358720 :     return true;
    1486              : }
    1487              : 
    1488              : void
    1489         1378 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
    1490              :                   ItemPointer maxtid)
    1491              : {
    1492         1378 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1493              :     BlockNumber startBlk;
    1494              :     BlockNumber numBlks;
    1495              :     ItemPointerData highestItem;
    1496              :     ItemPointerData lowestItem;
    1497              : 
    1498              :     /*
    1499              :      * For relations without any pages, we can simply leave the TID range
    1500              :      * unset.  There will be no tuples to scan, therefore no tuples outside
    1501              :      * the given TID range.
    1502              :      */
    1503         1378 :     if (scan->rs_nblocks == 0)
    1504           32 :         return;
    1505              : 
    1506              :     /*
    1507              :      * Set up some ItemPointers which point to the first and last possible
    1508              :      * tuples in the heap.
    1509              :      */
    1510         1370 :     ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
    1511         1370 :     ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
    1512              : 
    1513              :     /*
    1514              :      * If the given maximum TID is below the highest possible TID in the
    1515              :      * relation, then restrict the range to that, otherwise we scan to the end
    1516              :      * of the relation.
    1517              :      */
    1518         1370 :     if (ItemPointerCompare(maxtid, &highestItem) < 0)
    1519          172 :         ItemPointerCopy(maxtid, &highestItem);
    1520              : 
    1521              :     /*
    1522              :      * If the given minimum TID is above the lowest possible TID in the
    1523              :      * relation, then restrict the range to only scan for TIDs above that.
    1524              :      */
    1525         1370 :     if (ItemPointerCompare(mintid, &lowestItem) > 0)
    1526         1214 :         ItemPointerCopy(mintid, &lowestItem);
    1527              : 
    1528              :     /*
    1529              :      * Check for an empty range and protect from would be negative results
    1530              :      * from the numBlks calculation below.
    1531              :      */
    1532         1370 :     if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
    1533              :     {
    1534              :         /* Set an empty range of blocks to scan */
    1535           24 :         heap_setscanlimits(sscan, 0, 0);
    1536           24 :         return;
    1537              :     }
    1538              : 
    1539              :     /*
    1540              :      * Calculate the first block and the number of blocks we must scan. We
    1541              :      * could be more aggressive here and perform some more validation to try
    1542              :      * and further narrow the scope of blocks to scan by checking if the
    1543              :      * lowestItem has an offset above MaxOffsetNumber.  In this case, we could
    1544              :      * advance startBlk by one.  Likewise, if highestItem has an offset of 0
    1545              :      * we could scan one fewer blocks.  However, such an optimization does not
    1546              :      * seem worth troubling over, currently.
    1547              :      */
    1548         1346 :     startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
    1549              : 
    1550         1346 :     numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
    1551         1346 :         ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
    1552              : 
    1553              :     /* Set the start block and number of blocks to scan */
    1554         1346 :     heap_setscanlimits(sscan, startBlk, numBlks);
    1555              : 
    1556              :     /* Finally, set the TID range in sscan */
    1557         1346 :     ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
    1558         1346 :     ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
    1559              : }
    1560              : 
    1561              : bool
    1562         6516 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
    1563              :                           TupleTableSlot *slot)
    1564              : {
    1565         6516 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1566         6516 :     ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
    1567         6516 :     ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
    1568              : 
    1569              :     /* Note: no locking manipulations needed */
    1570              :     for (;;)
    1571              :     {
    1572         6640 :         if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1573         6640 :             heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1574              :         else
    1575            0 :             heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1576              : 
    1577         6630 :         if (scan->rs_ctup.t_data == NULL)
    1578              :         {
    1579          138 :             ExecClearTuple(slot);
    1580          138 :             return false;
    1581              :         }
    1582              : 
    1583              :         /*
    1584              :          * heap_set_tidrange will have used heap_setscanlimits to limit the
    1585              :          * range of pages we scan to only ones that can contain the TID range
    1586              :          * we're scanning for.  Here we must filter out any tuples from these
    1587              :          * pages that are outside of that range.
    1588              :          */
    1589         6492 :         if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
    1590              :         {
    1591          124 :             ExecClearTuple(slot);
    1592              : 
    1593              :             /*
    1594              :              * When scanning backwards, the TIDs will be in descending order.
    1595              :              * Future tuples in this direction will be lower still, so we can
    1596              :              * just return false to indicate there will be no more tuples.
    1597              :              */
    1598          124 :             if (ScanDirectionIsBackward(direction))
    1599            0 :                 return false;
    1600              : 
    1601          124 :             continue;
    1602              :         }
    1603              : 
    1604              :         /*
    1605              :          * Likewise for the final page, we must filter out TIDs greater than
    1606              :          * maxtid.
    1607              :          */
    1608         6368 :         if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
    1609              :         {
    1610           74 :             ExecClearTuple(slot);
    1611              : 
    1612              :             /*
    1613              :              * When scanning forward, the TIDs will be in ascending order.
    1614              :              * Future tuples in this direction will be higher still, so we can
    1615              :              * just return false to indicate there will be no more tuples.
    1616              :              */
    1617           74 :             if (ScanDirectionIsForward(direction))
    1618           74 :                 return false;
    1619            0 :             continue;
    1620              :         }
    1621              : 
    1622         6294 :         break;
    1623              :     }
    1624              : 
    1625              :     /*
    1626              :      * if we get here it means we have a new current scan tuple, so point to
    1627              :      * the proper return buffer and return the tuple.
    1628              :      */
    1629         6294 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1630              : 
    1631         6294 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
    1632         6294 :     return true;
    1633              : }
    1634              : 
    1635              : /*
    1636              :  *  heap_fetch      - retrieve tuple with given tid
    1637              :  *
    1638              :  * On entry, tuple->t_self is the TID to fetch.  We pin the buffer holding
    1639              :  * the tuple, fill in the remaining fields of *tuple, and check the tuple
    1640              :  * against the specified snapshot.
    1641              :  *
    1642              :  * If successful (tuple found and passes snapshot time qual), then *userbuf
    1643              :  * is set to the buffer holding the tuple and true is returned.  The caller
    1644              :  * must unpin the buffer when done with the tuple.
    1645              :  *
    1646              :  * If the tuple is not found (ie, item number references a deleted slot),
    1647              :  * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
    1648              :  * and false is returned.
    1649              :  *
    1650              :  * If the tuple is found but fails the time qual check, then the behavior
    1651              :  * depends on the keep_buf parameter.  If keep_buf is false, the results
    1652              :  * are the same as for the tuple-not-found case.  If keep_buf is true,
    1653              :  * then tuple->t_data and *userbuf are returned as for the success case,
    1654              :  * and again the caller must unpin the buffer; but false is returned.
    1655              :  *
    1656              :  * heap_fetch does not follow HOT chains: only the exact TID requested will
    1657              :  * be fetched.
    1658              :  *
    1659              :  * It is somewhat inconsistent that we ereport() on invalid block number but
    1660              :  * return false on invalid item number.  There are a couple of reasons though.
    1661              :  * One is that the caller can relatively easily check the block number for
    1662              :  * validity, but cannot check the item number without reading the page
    1663              :  * himself.  Another is that when we are following a t_ctid link, we can be
    1664              :  * reasonably confident that the page number is valid (since VACUUM shouldn't
    1665              :  * truncate off the destination page without having killed the referencing
    1666              :  * tuple first), but the item number might well not be good.
    1667              :  */
    1668              : bool
    1669       595186 : heap_fetch(Relation relation,
    1670              :            Snapshot snapshot,
    1671              :            HeapTuple tuple,
    1672              :            Buffer *userbuf,
    1673              :            bool keep_buf)
    1674              : {
    1675       595186 :     ItemPointer tid = &(tuple->t_self);
    1676              :     ItemId      lp;
    1677              :     Buffer      buffer;
    1678              :     Page        page;
    1679              :     OffsetNumber offnum;
    1680              :     bool        valid;
    1681              : 
    1682              :     /*
    1683              :      * Fetch and pin the appropriate page of the relation.
    1684              :      */
    1685       595186 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    1686              : 
    1687              :     /*
    1688              :      * Need share lock on buffer to examine tuple commit status.
    1689              :      */
    1690       595178 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1691       595178 :     page = BufferGetPage(buffer);
    1692              : 
    1693              :     /*
    1694              :      * We'd better check for out-of-range offnum in case of VACUUM since the
    1695              :      * TID was obtained.
    1696              :      */
    1697       595178 :     offnum = ItemPointerGetOffsetNumber(tid);
    1698       595178 :     if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1699              :     {
    1700            4 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1701            4 :         ReleaseBuffer(buffer);
    1702            4 :         *userbuf = InvalidBuffer;
    1703            4 :         tuple->t_data = NULL;
    1704            4 :         return false;
    1705              :     }
    1706              : 
    1707              :     /*
    1708              :      * get the item line pointer corresponding to the requested tid
    1709              :      */
    1710       595174 :     lp = PageGetItemId(page, offnum);
    1711              : 
    1712              :     /*
    1713              :      * Must check for deleted tuple.
    1714              :      */
    1715       595174 :     if (!ItemIdIsNormal(lp))
    1716              :     {
    1717          299 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1718          299 :         ReleaseBuffer(buffer);
    1719          299 :         *userbuf = InvalidBuffer;
    1720          299 :         tuple->t_data = NULL;
    1721          299 :         return false;
    1722              :     }
    1723              : 
    1724              :     /*
    1725              :      * fill in *tuple fields
    1726              :      */
    1727       594875 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1728       594875 :     tuple->t_len = ItemIdGetLength(lp);
    1729       594875 :     tuple->t_tableOid = RelationGetRelid(relation);
    1730              : 
    1731              :     /*
    1732              :      * check tuple visibility, then release lock
    1733              :      */
    1734       594875 :     valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
    1735              : 
    1736       594875 :     if (valid)
    1737       594815 :         PredicateLockTID(relation, &(tuple->t_self), snapshot,
    1738       594815 :                          HeapTupleHeaderGetXmin(tuple->t_data));
    1739              : 
    1740       594875 :     HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
    1741              : 
    1742       594875 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1743              : 
    1744       594875 :     if (valid)
    1745              :     {
    1746              :         /*
    1747              :          * All checks passed, so return the tuple as valid. Caller is now
    1748              :          * responsible for releasing the buffer.
    1749              :          */
    1750       594815 :         *userbuf = buffer;
    1751              : 
    1752       594815 :         return true;
    1753              :     }
    1754              : 
    1755              :     /* Tuple failed time qual, but maybe caller wants to see it anyway. */
    1756           60 :     if (keep_buf)
    1757           34 :         *userbuf = buffer;
    1758              :     else
    1759              :     {
    1760           26 :         ReleaseBuffer(buffer);
    1761           26 :         *userbuf = InvalidBuffer;
    1762           26 :         tuple->t_data = NULL;
    1763              :     }
    1764              : 
    1765           60 :     return false;
    1766              : }
    1767              : 
    1768              : /*
    1769              :  *  heap_hot_search_buffer  - search HOT chain for tuple satisfying snapshot
    1770              :  *
    1771              :  * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
    1772              :  * of a HOT chain), and buffer is the buffer holding this tuple.  We search
    1773              :  * for the first chain member satisfying the given snapshot.  If one is
    1774              :  * found, we update *tid to reference that tuple's offset number, and
    1775              :  * return true.  If no match, return false without modifying *tid.
    1776              :  *
    1777              :  * heapTuple is a caller-supplied buffer.  When a match is found, we return
    1778              :  * the tuple here, in addition to updating *tid.  If no match is found, the
    1779              :  * contents of this buffer on return are undefined.
    1780              :  *
    1781              :  * If all_dead is not NULL, we check non-visible tuples to see if they are
    1782              :  * globally dead; *all_dead is set true if all members of the HOT chain
    1783              :  * are vacuumable, false if not.
    1784              :  *
    1785              :  * Unlike heap_fetch, the caller must already have pin and (at least) share
    1786              :  * lock on the buffer; it is still pinned/locked at exit.
    1787              :  */
    1788              : bool
    1789     28561158 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
    1790              :                        Snapshot snapshot, HeapTuple heapTuple,
    1791              :                        bool *all_dead, bool first_call)
    1792              : {
    1793     28561158 :     Page        page = BufferGetPage(buffer);
    1794     28561158 :     TransactionId prev_xmax = InvalidTransactionId;
    1795              :     BlockNumber blkno;
    1796              :     OffsetNumber offnum;
    1797              :     bool        at_chain_start;
    1798              :     bool        valid;
    1799              :     bool        skip;
    1800     28561158 :     GlobalVisState *vistest = NULL;
    1801              : 
    1802              :     /* If this is not the first call, previous call returned a (live!) tuple */
    1803     28561158 :     if (all_dead)
    1804     24556593 :         *all_dead = first_call;
    1805              : 
    1806     28561158 :     blkno = ItemPointerGetBlockNumber(tid);
    1807     28561158 :     offnum = ItemPointerGetOffsetNumber(tid);
    1808     28561158 :     at_chain_start = first_call;
    1809     28561158 :     skip = !first_call;
    1810              : 
    1811              :     /* XXX: we should assert that a snapshot is pushed or registered */
    1812              :     Assert(TransactionIdIsValid(RecentXmin));
    1813              :     Assert(BufferGetBlockNumber(buffer) == blkno);
    1814              : 
    1815              :     /* Scan through possible multiple members of HOT-chain */
    1816              :     for (;;)
    1817      1763719 :     {
    1818              :         ItemId      lp;
    1819              : 
    1820              :         /* check for bogus TID */
    1821     30324877 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1822              :             break;
    1823              : 
    1824     30324877 :         lp = PageGetItemId(page, offnum);
    1825              : 
    1826              :         /* check for unused, dead, or redirected items */
    1827     30324877 :         if (!ItemIdIsNormal(lp))
    1828              :         {
    1829              :             /* We should only see a redirect at start of chain */
    1830      1092568 :             if (ItemIdIsRedirected(lp) && at_chain_start)
    1831              :             {
    1832              :                 /* Follow the redirect */
    1833       621660 :                 offnum = ItemIdGetRedirect(lp);
    1834       621660 :                 at_chain_start = false;
    1835       621660 :                 continue;
    1836              :             }
    1837              :             /* else must be end of chain */
    1838       470908 :             break;
    1839              :         }
    1840              : 
    1841              :         /*
    1842              :          * Update heapTuple to point to the element of the HOT chain we're
    1843              :          * currently investigating. Having t_self set correctly is important
    1844              :          * because the SSI checks and the *Satisfies routine for historical
    1845              :          * MVCC snapshots need the correct tid to decide about the visibility.
    1846              :          */
    1847     29232309 :         heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1848     29232309 :         heapTuple->t_len = ItemIdGetLength(lp);
    1849     29232309 :         heapTuple->t_tableOid = RelationGetRelid(relation);
    1850     29232309 :         ItemPointerSet(&heapTuple->t_self, blkno, offnum);
    1851              : 
    1852              :         /*
    1853              :          * Shouldn't see a HEAP_ONLY tuple at chain start.
    1854              :          */
    1855     29232309 :         if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
    1856            0 :             break;
    1857              : 
    1858              :         /*
    1859              :          * The xmin should match the previous xmax value, else chain is
    1860              :          * broken.
    1861              :          */
    1862     30374368 :         if (TransactionIdIsValid(prev_xmax) &&
    1863      1142059 :             !TransactionIdEquals(prev_xmax,
    1864              :                                  HeapTupleHeaderGetXmin(heapTuple->t_data)))
    1865            0 :             break;
    1866              : 
    1867              :         /*
    1868              :          * When first_call is true (and thus, skip is initially false) we'll
    1869              :          * return the first tuple we find.  But on later passes, heapTuple
    1870              :          * will initially be pointing to the tuple we returned last time.
    1871              :          * Returning it again would be incorrect (and would loop forever), so
    1872              :          * we skip it and return the next match we find.
    1873              :          */
    1874     29232309 :         if (!skip)
    1875              :         {
    1876              :             /* If it's visible per the snapshot, we must return it */
    1877     29138488 :             valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
    1878     29138488 :             HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
    1879              :                                                 buffer, snapshot);
    1880              : 
    1881     29138483 :             if (valid)
    1882              :             {
    1883     19899274 :                 ItemPointerSetOffsetNumber(tid, offnum);
    1884     19899274 :                 PredicateLockTID(relation, &heapTuple->t_self, snapshot,
    1885     19899274 :                                  HeapTupleHeaderGetXmin(heapTuple->t_data));
    1886     19899274 :                 if (all_dead)
    1887     16275931 :                     *all_dead = false;
    1888     19899274 :                 return true;
    1889              :             }
    1890              :         }
    1891      9333030 :         skip = false;
    1892              : 
    1893              :         /*
    1894              :          * If we can't see it, maybe no one else can either.  At caller
    1895              :          * request, check whether all chain members are dead to all
    1896              :          * transactions.
    1897              :          *
    1898              :          * Note: if you change the criterion here for what is "dead", fix the
    1899              :          * planner's get_actual_variable_range() function to match.
    1900              :          */
    1901      9333030 :         if (all_dead && *all_dead)
    1902              :         {
    1903      8504761 :             if (!vistest)
    1904      8338714 :                 vistest = GlobalVisTestFor(relation);
    1905              : 
    1906      8504761 :             if (!HeapTupleIsSurelyDead(heapTuple, vistest))
    1907      8038830 :                 *all_dead = false;
    1908              :         }
    1909              : 
    1910              :         /*
    1911              :          * Check to see if HOT chain continues past this tuple; if so fetch
    1912              :          * the next offnum and loop around.
    1913              :          */
    1914      9333030 :         if (HeapTupleIsHotUpdated(heapTuple))
    1915              :         {
    1916              :             Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
    1917              :                    blkno);
    1918      1142059 :             offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
    1919      1142059 :             at_chain_start = false;
    1920      1142059 :             prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
    1921              :         }
    1922              :         else
    1923      8190971 :             break;              /* end of chain */
    1924              :     }
    1925              : 
    1926      8661879 :     return false;
    1927              : }
    1928              : 
    1929              : /*
    1930              :  *  heap_get_latest_tid -  get the latest tid of a specified tuple
    1931              :  *
    1932              :  * Actually, this gets the latest version that is visible according to the
    1933              :  * scan's snapshot.  Create a scan using SnapshotDirty to get the very latest,
    1934              :  * possibly uncommitted version.
    1935              :  *
    1936              :  * *tid is both an input and an output parameter: it is updated to
    1937              :  * show the latest version of the row.  Note that it will not be changed
    1938              :  * if no version of the row passes the snapshot test.
    1939              :  */
    1940              : void
    1941          199 : heap_get_latest_tid(TableScanDesc sscan,
    1942              :                     ItemPointer tid)
    1943              : {
    1944          199 :     Relation    relation = sscan->rs_rd;
    1945          199 :     Snapshot    snapshot = sscan->rs_snapshot;
    1946              :     ItemPointerData ctid;
    1947              :     TransactionId priorXmax;
    1948              : 
    1949              :     /*
    1950              :      * table_tuple_get_latest_tid() verified that the passed in tid is valid.
    1951              :      * Assume that t_ctid links are valid however - there shouldn't be invalid
    1952              :      * ones in the table.
    1953              :      */
    1954              :     Assert(ItemPointerIsValid(tid));
    1955              : 
    1956              :     /*
    1957              :      * Loop to chase down t_ctid links.  At top of loop, ctid is the tuple we
    1958              :      * need to examine, and *tid is the TID we will return if ctid turns out
    1959              :      * to be bogus.
    1960              :      *
    1961              :      * Note that we will loop until we reach the end of the t_ctid chain.
    1962              :      * Depending on the snapshot passed, there might be at most one visible
    1963              :      * version of the row, but we don't try to optimize for that.
    1964              :      */
    1965          199 :     ctid = *tid;
    1966          199 :     priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    1967              :     for (;;)
    1968           60 :     {
    1969              :         Buffer      buffer;
    1970              :         Page        page;
    1971              :         OffsetNumber offnum;
    1972              :         ItemId      lp;
    1973              :         HeapTupleData tp;
    1974              :         bool        valid;
    1975              : 
    1976              :         /*
    1977              :          * Read, pin, and lock the page.
    1978              :          */
    1979          259 :         buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
    1980          259 :         LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1981          259 :         page = BufferGetPage(buffer);
    1982              : 
    1983              :         /*
    1984              :          * Check for bogus item number.  This is not treated as an error
    1985              :          * condition because it can happen while following a t_ctid link. We
    1986              :          * just assume that the prior tid is OK and return it unchanged.
    1987              :          */
    1988          259 :         offnum = ItemPointerGetOffsetNumber(&ctid);
    1989          259 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1990              :         {
    1991            0 :             UnlockReleaseBuffer(buffer);
    1992            0 :             break;
    1993              :         }
    1994          259 :         lp = PageGetItemId(page, offnum);
    1995          259 :         if (!ItemIdIsNormal(lp))
    1996              :         {
    1997            0 :             UnlockReleaseBuffer(buffer);
    1998            0 :             break;
    1999              :         }
    2000              : 
    2001              :         /* OK to access the tuple */
    2002          259 :         tp.t_self = ctid;
    2003          259 :         tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    2004          259 :         tp.t_len = ItemIdGetLength(lp);
    2005          259 :         tp.t_tableOid = RelationGetRelid(relation);
    2006              : 
    2007              :         /*
    2008              :          * After following a t_ctid link, we might arrive at an unrelated
    2009              :          * tuple.  Check for XMIN match.
    2010              :          */
    2011          319 :         if (TransactionIdIsValid(priorXmax) &&
    2012           60 :             !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
    2013              :         {
    2014            0 :             UnlockReleaseBuffer(buffer);
    2015            0 :             break;
    2016              :         }
    2017              : 
    2018              :         /*
    2019              :          * Check tuple visibility; if visible, set it as the new result
    2020              :          * candidate.
    2021              :          */
    2022          259 :         valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
    2023          259 :         HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
    2024          259 :         if (valid)
    2025          183 :             *tid = ctid;
    2026              : 
    2027              :         /*
    2028              :          * If there's a valid t_ctid link, follow it, else we're done.
    2029              :          */
    2030          367 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    2031          184 :             HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
    2032          152 :             HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
    2033           76 :             ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    2034              :         {
    2035          199 :             UnlockReleaseBuffer(buffer);
    2036          199 :             break;
    2037              :         }
    2038              : 
    2039           60 :         ctid = tp.t_data->t_ctid;
    2040           60 :         priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    2041           60 :         UnlockReleaseBuffer(buffer);
    2042              :     }                           /* end of loop */
    2043          199 : }
    2044              : 
    2045              : 
    2046              : /*
    2047              :  * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
    2048              :  *
    2049              :  * This is called after we have waited for the XMAX transaction to terminate.
    2050              :  * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
    2051              :  * be set on exit.  If the transaction committed, we set the XMAX_COMMITTED
    2052              :  * hint bit if possible --- but beware that that may not yet be possible,
    2053              :  * if the transaction committed asynchronously.
    2054              :  *
    2055              :  * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
    2056              :  * even if it commits.
    2057              :  *
    2058              :  * Hence callers should look only at XMAX_INVALID.
    2059              :  *
    2060              :  * Note this is not allowed for tuples whose xmax is a multixact.
    2061              :  */
    2062              : static void
    2063          229 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
    2064              : {
    2065              :     Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
    2066              :     Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
    2067              : 
    2068          229 :     if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
    2069              :     {
    2070          406 :         if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
    2071          178 :             TransactionIdDidCommit(xid))
    2072          151 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
    2073              :                                  xid);
    2074              :         else
    2075           77 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
    2076              :                                  InvalidTransactionId);
    2077              :     }
    2078          229 : }
    2079              : 
    2080              : 
    2081              : /*
    2082              :  * GetBulkInsertState - prepare status object for a bulk insert
    2083              :  */
    2084              : BulkInsertState
    2085         3476 : GetBulkInsertState(void)
    2086              : {
    2087              :     BulkInsertState bistate;
    2088              : 
    2089         3476 :     bistate = (BulkInsertState) palloc_object(BulkInsertStateData);
    2090         3476 :     bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
    2091         3476 :     bistate->current_buf = InvalidBuffer;
    2092         3476 :     bistate->next_free = InvalidBlockNumber;
    2093         3476 :     bistate->last_free = InvalidBlockNumber;
    2094         3476 :     bistate->already_extended_by = 0;
    2095         3476 :     return bistate;
    2096              : }
    2097              : 
    2098              : /*
    2099              :  * FreeBulkInsertState - clean up after finishing a bulk insert
    2100              :  */
    2101              : void
    2102         3255 : FreeBulkInsertState(BulkInsertState bistate)
    2103              : {
    2104         3255 :     if (bistate->current_buf != InvalidBuffer)
    2105         2563 :         ReleaseBuffer(bistate->current_buf);
    2106         3255 :     FreeAccessStrategy(bistate->strategy);
    2107         3255 :     pfree(bistate);
    2108         3255 : }
    2109              : 
    2110              : /*
    2111              :  * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
    2112              :  */
    2113              : void
    2114        90779 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
    2115              : {
    2116        90779 :     if (bistate->current_buf != InvalidBuffer)
    2117        40028 :         ReleaseBuffer(bistate->current_buf);
    2118        90779 :     bistate->current_buf = InvalidBuffer;
    2119              : 
    2120              :     /*
    2121              :      * Despite the name, we also reset bulk relation extension state.
    2122              :      * Otherwise we can end up erroring out due to looking for free space in
    2123              :      * ->next_free of one partition, even though ->next_free was set when
    2124              :      * extending another partition. It could obviously also be bad for
    2125              :      * efficiency to look at existing blocks at offsets from another
    2126              :      * partition, even if we don't error out.
    2127              :      */
    2128        90779 :     bistate->next_free = InvalidBlockNumber;
    2129        90779 :     bistate->last_free = InvalidBlockNumber;
    2130        90779 : }
    2131              : 
    2132              : 
    2133              : /*
    2134              :  *  heap_insert     - insert tuple into a heap
    2135              :  *
    2136              :  * The new tuple is stamped with current transaction ID and the specified
    2137              :  * command ID.
    2138              :  *
    2139              :  * See table_tuple_insert for comments about most of the input flags, except
    2140              :  * that this routine directly takes a tuple rather than a slot.
    2141              :  *
    2142              :  * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
    2143              :  * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
    2144              :  * implement table_tuple_insert_speculative().
    2145              :  *
    2146              :  * On return the header fields of *tup are updated to match the stored tuple;
    2147              :  * in particular tup->t_self receives the actual TID where the tuple was
    2148              :  * stored.  But note that any toasting of fields within the tuple data is NOT
    2149              :  * reflected into *tup.
    2150              :  */
    2151              : void
    2152     10108893 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
    2153              :             int options, BulkInsertState bistate)
    2154              : {
    2155     10108893 :     TransactionId xid = GetCurrentTransactionId();
    2156              :     HeapTuple   heaptup;
    2157              :     Buffer      buffer;
    2158     10108886 :     Buffer      vmbuffer = InvalidBuffer;
    2159     10108886 :     bool        all_visible_cleared = false;
    2160              : 
    2161              :     /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
    2162              :     Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
    2163              :            RelationGetNumberOfAttributes(relation));
    2164              : 
    2165     10108886 :     AssertHasSnapshotForToast(relation);
    2166              : 
    2167              :     /*
    2168              :      * Fill in tuple header fields and toast the tuple if necessary.
    2169              :      *
    2170              :      * Note: below this point, heaptup is the data we actually intend to store
    2171              :      * into the relation; tup is the caller's original untoasted data.
    2172              :      */
    2173     10108886 :     heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
    2174              : 
    2175              :     /*
    2176              :      * Find buffer to insert this tuple into.  If the page is all visible,
    2177              :      * this will also pin the requisite visibility map page.
    2178              :      */
    2179     10108886 :     buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
    2180              :                                        InvalidBuffer, options, bistate,
    2181              :                                        &vmbuffer, NULL,
    2182              :                                        0);
    2183              : 
    2184              :     /*
    2185              :      * We're about to do the actual insert -- but check for conflict first, to
    2186              :      * avoid possibly having to roll back work we've just done.
    2187              :      *
    2188              :      * This is safe without a recheck as long as there is no possibility of
    2189              :      * another process scanning the page between this check and the insert
    2190              :      * being visible to the scan (i.e., an exclusive buffer content lock is
    2191              :      * continuously held from this point until the tuple insert is visible).
    2192              :      *
    2193              :      * For a heap insert, we only need to check for table-level SSI locks. Our
    2194              :      * new tuple can't possibly conflict with existing tuple locks, and heap
    2195              :      * page locks are only consolidated versions of tuple locks; they do not
    2196              :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2197              :      * buffer when making the call, which makes for a faster check.
    2198              :      */
    2199     10108886 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2200              : 
    2201              :     /* NO EREPORT(ERROR) from here till changes are logged */
    2202     10108874 :     START_CRIT_SECTION();
    2203              : 
    2204     10108874 :     RelationPutHeapTuple(relation, buffer, heaptup,
    2205     10108874 :                          (options & HEAP_INSERT_SPECULATIVE) != 0);
    2206              : 
    2207     10108874 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    2208              :     {
    2209         7296 :         all_visible_cleared = true;
    2210         7296 :         PageClearAllVisible(BufferGetPage(buffer));
    2211         7296 :         visibilitymap_clear(relation,
    2212         7296 :                             ItemPointerGetBlockNumber(&(heaptup->t_self)),
    2213              :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    2214              :     }
    2215              : 
    2216              :     /*
    2217              :      * XXX Should we set PageSetPrunable on this page ?
    2218              :      *
    2219              :      * The inserting transaction may eventually abort thus making this tuple
    2220              :      * DEAD and hence available for pruning. Though we don't want to optimize
    2221              :      * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
    2222              :      * aborted tuple will never be pruned until next vacuum is triggered.
    2223              :      *
    2224              :      * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
    2225              :      */
    2226              : 
    2227     10108874 :     MarkBufferDirty(buffer);
    2228              : 
    2229              :     /* XLOG stuff */
    2230     10108874 :     if (RelationNeedsWAL(relation))
    2231              :     {
    2232              :         xl_heap_insert xlrec;
    2233              :         xl_heap_header xlhdr;
    2234              :         XLogRecPtr  recptr;
    2235      8671048 :         Page        page = BufferGetPage(buffer);
    2236      8671048 :         uint8       info = XLOG_HEAP_INSERT;
    2237      8671048 :         int         bufflags = 0;
    2238              : 
    2239              :         /*
    2240              :          * If this is a catalog, we need to transmit combo CIDs to properly
    2241              :          * decode, so log that as well.
    2242              :          */
    2243      8671048 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    2244         3498 :             log_heap_new_cid(relation, heaptup);
    2245              : 
    2246              :         /*
    2247              :          * If this is the single and first tuple on page, we can reinit the
    2248              :          * page instead of restoring the whole thing.  Set flag, and hide
    2249              :          * buffer references from XLogInsert.
    2250              :          */
    2251      8780679 :         if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
    2252       109631 :             PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    2253              :         {
    2254       108755 :             info |= XLOG_HEAP_INIT_PAGE;
    2255       108755 :             bufflags |= REGBUF_WILL_INIT;
    2256              :         }
    2257              : 
    2258      8671048 :         xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2259      8671048 :         xlrec.flags = 0;
    2260      8671048 :         if (all_visible_cleared)
    2261         7292 :             xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
    2262      8671048 :         if (options & HEAP_INSERT_SPECULATIVE)
    2263         2206 :             xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
    2264              :         Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
    2265              : 
    2266              :         /*
    2267              :          * For logical decoding, we need the tuple even if we're doing a full
    2268              :          * page write, so make sure it's included even if we take a full-page
    2269              :          * image. (XXX We could alternatively store a pointer into the FPW).
    2270              :          */
    2271      8671048 :         if (RelationIsLogicallyLogged(relation) &&
    2272       252700 :             !(options & HEAP_INSERT_NO_LOGICAL))
    2273              :         {
    2274       252661 :             xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2275       252661 :             bufflags |= REGBUF_KEEP_DATA;
    2276              : 
    2277       252661 :             if (IsToastRelation(relation))
    2278         1781 :                 xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
    2279              :         }
    2280              : 
    2281      8671048 :         XLogBeginInsert();
    2282      8671048 :         XLogRegisterData(&xlrec, SizeOfHeapInsert);
    2283              : 
    2284      8671048 :         xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
    2285      8671048 :         xlhdr.t_infomask = heaptup->t_data->t_infomask;
    2286      8671048 :         xlhdr.t_hoff = heaptup->t_data->t_hoff;
    2287              : 
    2288              :         /*
    2289              :          * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
    2290              :          * write the whole page to the xlog, we don't need to store
    2291              :          * xl_heap_header in the xlog.
    2292              :          */
    2293      8671048 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2294      8671048 :         XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
    2295              :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    2296      8671048 :         XLogRegisterBufData(0,
    2297      8671048 :                             (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2298      8671048 :                             heaptup->t_len - SizeofHeapTupleHeader);
    2299              : 
    2300              :         /* filtering by origin on a row level is much more efficient */
    2301      8671048 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2302              : 
    2303      8671048 :         recptr = XLogInsert(RM_HEAP_ID, info);
    2304              : 
    2305      8671048 :         PageSetLSN(page, recptr);
    2306              :     }
    2307              : 
    2308     10108874 :     END_CRIT_SECTION();
    2309              : 
    2310     10108874 :     UnlockReleaseBuffer(buffer);
    2311     10108874 :     if (vmbuffer != InvalidBuffer)
    2312         7665 :         ReleaseBuffer(vmbuffer);
    2313              : 
    2314              :     /*
    2315              :      * If tuple is cacheable, mark it for invalidation from the caches in case
    2316              :      * we abort.  Note it is OK to do this after releasing the buffer, because
    2317              :      * the heaptup data structure is all in local memory, not in the shared
    2318              :      * buffer.
    2319              :      */
    2320     10108874 :     CacheInvalidateHeapTuple(relation, heaptup, NULL);
    2321              : 
    2322              :     /* Note: speculative insertions are counted too, even if aborted later */
    2323     10108874 :     pgstat_count_heap_insert(relation, 1);
    2324              : 
    2325              :     /*
    2326              :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    2327              :      * back to the caller's image, too.
    2328              :      */
    2329     10108874 :     if (heaptup != tup)
    2330              :     {
    2331        21108 :         tup->t_self = heaptup->t_self;
    2332        21108 :         heap_freetuple(heaptup);
    2333              :     }
    2334     10108874 : }
    2335              : 
    2336              : /*
    2337              :  * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
    2338              :  * tuple header fields and toasts the tuple if necessary.  Returns a toasted
    2339              :  * version of the tuple if it was toasted, or the original tuple if not. Note
    2340              :  * that in any case, the header fields are also set in the original tuple.
    2341              :  */
    2342              : static HeapTuple
    2343     11879116 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
    2344              :                     CommandId cid, int options)
    2345              : {
    2346              :     /*
    2347              :      * To allow parallel inserts, we need to ensure that they are safe to be
    2348              :      * performed in workers. We have the infrastructure to allow parallel
    2349              :      * inserts in general except for the cases where inserts generate a new
    2350              :      * CommandId (eg. inserts into a table having a foreign key column).
    2351              :      */
    2352     11879116 :     if (IsParallelWorker())
    2353            0 :         ereport(ERROR,
    2354              :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2355              :                  errmsg("cannot insert tuples in a parallel worker")));
    2356              : 
    2357     11879116 :     tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    2358     11879116 :     tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    2359     11879116 :     tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
    2360     11879116 :     HeapTupleHeaderSetXmin(tup->t_data, xid);
    2361     11879116 :     if (options & HEAP_INSERT_FROZEN)
    2362       102651 :         HeapTupleHeaderSetXminFrozen(tup->t_data);
    2363              : 
    2364     11879116 :     HeapTupleHeaderSetCmin(tup->t_data, cid);
    2365     11879116 :     HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
    2366     11879116 :     tup->t_tableOid = RelationGetRelid(relation);
    2367              : 
    2368              :     /*
    2369              :      * If the new tuple is too big for storage or contains already toasted
    2370              :      * out-of-line attributes from some other relation, invoke the toaster.
    2371              :      */
    2372     11879116 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    2373        33825 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    2374              :     {
    2375              :         /* toast table entries should never be recursively toasted */
    2376              :         Assert(!HeapTupleHasExternal(tup));
    2377        33764 :         return tup;
    2378              :     }
    2379     11845352 :     else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
    2380        21167 :         return heap_toast_insert_or_update(relation, tup, NULL, options);
    2381              :     else
    2382     11824185 :         return tup;
    2383              : }
    2384              : 
    2385              : /*
    2386              :  * Helper for heap_multi_insert() that computes the number of entire pages
    2387              :  * that inserting the remaining heaptuples requires. Used to determine how
    2388              :  * much the relation needs to be extended by.
    2389              :  */
    2390              : static int
    2391       464720 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
    2392              : {
    2393       464720 :     size_t      page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
    2394       464720 :     int         npages = 1;
    2395              : 
    2396      2910530 :     for (int i = done; i < ntuples; i++)
    2397              :     {
    2398      2445810 :         size_t      tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
    2399              : 
    2400      2445810 :         if (page_avail < tup_sz)
    2401              :         {
    2402        17503 :             npages++;
    2403        17503 :             page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
    2404              :         }
    2405      2445810 :         page_avail -= tup_sz;
    2406              :     }
    2407              : 
    2408       464720 :     return npages;
    2409              : }
    2410              : 
    2411              : /*
    2412              :  *  heap_multi_insert   - insert multiple tuples into a heap
    2413              :  *
    2414              :  * This is like heap_insert(), but inserts multiple tuples in one operation.
    2415              :  * That's faster than calling heap_insert() in a loop, because when multiple
    2416              :  * tuples can be inserted on a single page, we can write just a single WAL
    2417              :  * record covering all of them, and only need to lock/unlock the page once.
    2418              :  *
    2419              :  * Note: this leaks memory into the current memory context. You can create a
    2420              :  * temporary context before calling this, if that's a problem.
    2421              :  */
    2422              : void
    2423       456790 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
    2424              :                   CommandId cid, int options, BulkInsertState bistate)
    2425              : {
    2426       456790 :     TransactionId xid = GetCurrentTransactionId();
    2427              :     HeapTuple  *heaptuples;
    2428              :     int         i;
    2429              :     int         ndone;
    2430              :     PGAlignedBlock scratch;
    2431              :     Page        page;
    2432       456790 :     Buffer      vmbuffer = InvalidBuffer;
    2433              :     bool        needwal;
    2434              :     Size        saveFreeSpace;
    2435       456790 :     bool        need_tuple_data = RelationIsLogicallyLogged(relation);
    2436       456790 :     bool        need_cids = RelationIsAccessibleInLogicalDecoding(relation);
    2437       456790 :     bool        starting_with_empty_page = false;
    2438       456790 :     int         npages = 0;
    2439       456790 :     int         npages_used = 0;
    2440              : 
    2441              :     /* currently not needed (thus unsupported) for heap_multi_insert() */
    2442              :     Assert(!(options & HEAP_INSERT_NO_LOGICAL));
    2443              : 
    2444       456790 :     AssertHasSnapshotForToast(relation);
    2445              : 
    2446       456790 :     needwal = RelationNeedsWAL(relation);
    2447       456790 :     saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
    2448              :                                                    HEAP_DEFAULT_FILLFACTOR);
    2449              : 
    2450              :     /* Toast and set header data in all the slots */
    2451       456790 :     heaptuples = palloc(ntuples * sizeof(HeapTuple));
    2452      2227020 :     for (i = 0; i < ntuples; i++)
    2453              :     {
    2454              :         HeapTuple   tuple;
    2455              : 
    2456      1770230 :         tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
    2457      1770230 :         slots[i]->tts_tableOid = RelationGetRelid(relation);
    2458      1770230 :         tuple->t_tableOid = slots[i]->tts_tableOid;
    2459      1770230 :         heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
    2460              :                                             options);
    2461              :     }
    2462              : 
    2463              :     /*
    2464              :      * We're about to do the actual inserts -- but check for conflict first,
    2465              :      * to minimize the possibility of having to roll back work we've just
    2466              :      * done.
    2467              :      *
    2468              :      * A check here does not definitively prevent a serialization anomaly;
    2469              :      * that check MUST be done at least past the point of acquiring an
    2470              :      * exclusive buffer content lock on every buffer that will be affected,
    2471              :      * and MAY be done after all inserts are reflected in the buffers and
    2472              :      * those locks are released; otherwise there is a race condition.  Since
    2473              :      * multiple buffers can be locked and unlocked in the loop below, and it
    2474              :      * would not be feasible to identify and lock all of those buffers before
    2475              :      * the loop, we must do a final check at the end.
    2476              :      *
    2477              :      * The check here could be omitted with no loss of correctness; it is
    2478              :      * present strictly as an optimization.
    2479              :      *
    2480              :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2481              :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2482              :      * page locks are only consolidated versions of tuple locks; they do not
    2483              :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2484              :      * buffer when making the call, which makes for a faster check.
    2485              :      */
    2486       456790 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2487              : 
    2488       456790 :     ndone = 0;
    2489       930751 :     while (ndone < ntuples)
    2490              :     {
    2491              :         Buffer      buffer;
    2492       473961 :         bool        all_visible_cleared = false;
    2493       473961 :         bool        all_frozen_set = false;
    2494              :         int         nthispage;
    2495              : 
    2496       473961 :         CHECK_FOR_INTERRUPTS();
    2497              : 
    2498              :         /*
    2499              :          * Compute number of pages needed to fit the to-be-inserted tuples in
    2500              :          * the worst case.  This will be used to determine how much to extend
    2501              :          * the relation by in RelationGetBufferForTuple(), if needed.  If we
    2502              :          * filled a prior page from scratch, we can just update our last
    2503              :          * computation, but if we started with a partially filled page,
    2504              :          * recompute from scratch, the number of potentially required pages
    2505              :          * can vary due to tuples needing to fit onto the page, page headers
    2506              :          * etc.
    2507              :          */
    2508       473961 :         if (ndone == 0 || !starting_with_empty_page)
    2509              :         {
    2510       464720 :             npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
    2511              :                                              saveFreeSpace);
    2512       464720 :             npages_used = 0;
    2513              :         }
    2514              :         else
    2515         9241 :             npages_used++;
    2516              : 
    2517              :         /*
    2518              :          * Find buffer where at least the next tuple will fit.  If the page is
    2519              :          * all-visible, this will also pin the requisite visibility map page.
    2520              :          *
    2521              :          * Also pin visibility map page if COPY FREEZE inserts tuples into an
    2522              :          * empty page. See all_frozen_set below.
    2523              :          */
    2524       473961 :         buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
    2525              :                                            InvalidBuffer, options, bistate,
    2526              :                                            &vmbuffer, NULL,
    2527              :                                            npages - npages_used);
    2528       473961 :         page = BufferGetPage(buffer);
    2529              : 
    2530       473961 :         starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
    2531              : 
    2532       473961 :         if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
    2533              :         {
    2534         1665 :             all_frozen_set = true;
    2535              :             /* Lock the vmbuffer before entering the critical section */
    2536         1665 :             LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
    2537              :         }
    2538              : 
    2539              :         /* NO EREPORT(ERROR) from here till changes are logged */
    2540       473961 :         START_CRIT_SECTION();
    2541              : 
    2542              :         /*
    2543              :          * RelationGetBufferForTuple has ensured that the first tuple fits.
    2544              :          * Put that on the page, and then as many other tuples as fit.
    2545              :          */
    2546       473961 :         RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
    2547              : 
    2548              :         /*
    2549              :          * For logical decoding we need combo CIDs to properly decode the
    2550              :          * catalog.
    2551              :          */
    2552       473961 :         if (needwal && need_cids)
    2553         5139 :             log_heap_new_cid(relation, heaptuples[ndone]);
    2554              : 
    2555      1770230 :         for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
    2556              :         {
    2557      1313440 :             HeapTuple   heaptup = heaptuples[ndone + nthispage];
    2558              : 
    2559      1313440 :             if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
    2560        17171 :                 break;
    2561              : 
    2562      1296269 :             RelationPutHeapTuple(relation, buffer, heaptup, false);
    2563              : 
    2564              :             /*
    2565              :              * For logical decoding we need combo CIDs to properly decode the
    2566              :              * catalog.
    2567              :              */
    2568      1296269 :             if (needwal && need_cids)
    2569         4802 :                 log_heap_new_cid(relation, heaptup);
    2570              :         }
    2571              : 
    2572              :         /*
    2573              :          * If the page is all visible, need to clear that, unless we're only
    2574              :          * going to add further frozen rows to it.
    2575              :          *
    2576              :          * If we're only adding already frozen rows to a previously empty
    2577              :          * page, mark it as all-frozen and update the visibility map. We're
    2578              :          * already holding a pin on the vmbuffer.
    2579              :          */
    2580       473961 :         if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
    2581              :         {
    2582         3763 :             all_visible_cleared = true;
    2583         3763 :             PageClearAllVisible(page);
    2584         3763 :             visibilitymap_clear(relation,
    2585              :                                 BufferGetBlockNumber(buffer),
    2586              :                                 vmbuffer, VISIBILITYMAP_VALID_BITS);
    2587              :         }
    2588       470198 :         else if (all_frozen_set)
    2589              :         {
    2590         1665 :             PageSetAllVisible(page);
    2591         1665 :             PageClearPrunable(page);
    2592         1665 :             visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
    2593              :                                      vmbuffer,
    2594              :                                      VISIBILITYMAP_ALL_VISIBLE |
    2595              :                                      VISIBILITYMAP_ALL_FROZEN,
    2596              :                                      relation->rd_locator);
    2597              :         }
    2598              : 
    2599              :         /*
    2600              :          * XXX Should we set PageSetPrunable on this page ? See heap_insert()
    2601              :          */
    2602              : 
    2603       473961 :         MarkBufferDirty(buffer);
    2604              : 
    2605              :         /* XLOG stuff */
    2606       473961 :         if (needwal)
    2607              :         {
    2608              :             XLogRecPtr  recptr;
    2609              :             xl_heap_multi_insert *xlrec;
    2610       470061 :             uint8       info = XLOG_HEAP2_MULTI_INSERT;
    2611              :             char       *tupledata;
    2612              :             int         totaldatalen;
    2613       470061 :             char       *scratchptr = scratch.data;
    2614              :             bool        init;
    2615       470061 :             int         bufflags = 0;
    2616              : 
    2617              :             /*
    2618              :              * If the page was previously empty, we can reinit the page
    2619              :              * instead of restoring the whole thing.
    2620              :              */
    2621       470061 :             init = starting_with_empty_page;
    2622              : 
    2623              :             /* allocate xl_heap_multi_insert struct from the scratch area */
    2624       470061 :             xlrec = (xl_heap_multi_insert *) scratchptr;
    2625       470061 :             scratchptr += SizeOfHeapMultiInsert;
    2626              : 
    2627              :             /*
    2628              :              * Allocate offsets array. Unless we're reinitializing the page,
    2629              :              * in that case the tuples are stored in order starting at
    2630              :              * FirstOffsetNumber and we don't need to store the offsets
    2631              :              * explicitly.
    2632              :              */
    2633       470061 :             if (!init)
    2634       454235 :                 scratchptr += nthispage * sizeof(OffsetNumber);
    2635              : 
    2636              :             /* the rest of the scratch space is used for tuple data */
    2637       470061 :             tupledata = scratchptr;
    2638              : 
    2639              :             /* check that the mutually exclusive flags are not both set */
    2640              :             Assert(!(all_visible_cleared && all_frozen_set));
    2641              : 
    2642       470061 :             xlrec->flags = 0;
    2643       470061 :             if (all_visible_cleared)
    2644         3763 :                 xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
    2645              : 
    2646              :             /*
    2647              :              * We don't have to worry about including a conflict xid in the
    2648              :              * WAL record, as HEAP_INSERT_FROZEN intentionally violates
    2649              :              * visibility rules.
    2650              :              */
    2651       470061 :             if (all_frozen_set)
    2652           17 :                 xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
    2653              : 
    2654       470061 :             xlrec->ntuples = nthispage;
    2655              : 
    2656              :             /*
    2657              :              * Write out an xl_multi_insert_tuple and the tuple data itself
    2658              :              * for each tuple.
    2659              :              */
    2660      2033699 :             for (i = 0; i < nthispage; i++)
    2661              :             {
    2662      1563638 :                 HeapTuple   heaptup = heaptuples[ndone + i];
    2663              :                 xl_multi_insert_tuple *tuphdr;
    2664              :                 int         datalen;
    2665              : 
    2666      1563638 :                 if (!init)
    2667       934013 :                     xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2668              :                 /* xl_multi_insert_tuple needs two-byte alignment. */
    2669      1563638 :                 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
    2670      1563638 :                 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
    2671              : 
    2672      1563638 :                 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
    2673      1563638 :                 tuphdr->t_infomask = heaptup->t_data->t_infomask;
    2674      1563638 :                 tuphdr->t_hoff = heaptup->t_data->t_hoff;
    2675              : 
    2676              :                 /* write bitmap [+ padding] [+ oid] + data */
    2677      1563638 :                 datalen = heaptup->t_len - SizeofHeapTupleHeader;
    2678      1563638 :                 memcpy(scratchptr,
    2679      1563638 :                        (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2680              :                        datalen);
    2681      1563638 :                 tuphdr->datalen = datalen;
    2682      1563638 :                 scratchptr += datalen;
    2683              :             }
    2684       470061 :             totaldatalen = scratchptr - tupledata;
    2685              :             Assert((scratchptr - scratch.data) < BLCKSZ);
    2686              : 
    2687       470061 :             if (need_tuple_data)
    2688           72 :                 xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2689              : 
    2690              :             /*
    2691              :              * Signal that this is the last xl_heap_multi_insert record
    2692              :              * emitted by this call to heap_multi_insert(). Needed for logical
    2693              :              * decoding so it knows when to cleanup temporary data.
    2694              :              */
    2695       470061 :             if (ndone + nthispage == ntuples)
    2696       456333 :                 xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
    2697              : 
    2698       470061 :             if (init)
    2699              :             {
    2700        15826 :                 info |= XLOG_HEAP_INIT_PAGE;
    2701        15826 :                 bufflags |= REGBUF_WILL_INIT;
    2702              :             }
    2703              : 
    2704              :             /*
    2705              :              * If we're doing logical decoding, include the new tuple data
    2706              :              * even if we take a full-page image of the page.
    2707              :              */
    2708       470061 :             if (need_tuple_data)
    2709           72 :                 bufflags |= REGBUF_KEEP_DATA;
    2710              : 
    2711       470061 :             XLogBeginInsert();
    2712       470061 :             XLogRegisterData(xlrec, tupledata - scratch.data);
    2713       470061 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2714       470061 :             if (all_frozen_set)
    2715           17 :                 XLogRegisterBuffer(1, vmbuffer, 0);
    2716              : 
    2717       470061 :             XLogRegisterBufData(0, tupledata, totaldatalen);
    2718              : 
    2719              :             /* filtering by origin on a row level is much more efficient */
    2720       470061 :             XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2721              : 
    2722       470061 :             recptr = XLogInsert(RM_HEAP2_ID, info);
    2723              : 
    2724       470061 :             PageSetLSN(page, recptr);
    2725       470061 :             if (all_frozen_set)
    2726              :             {
    2727              :                 Assert(BufferIsDirty(vmbuffer));
    2728           17 :                 PageSetLSN(BufferGetPage(vmbuffer), recptr);
    2729              :             }
    2730              :         }
    2731              : 
    2732       473961 :         END_CRIT_SECTION();
    2733              : 
    2734       473961 :         if (all_frozen_set)
    2735         1665 :             LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
    2736              : 
    2737       473961 :         UnlockReleaseBuffer(buffer);
    2738       473961 :         ndone += nthispage;
    2739              : 
    2740              :         /*
    2741              :          * NB: Only release vmbuffer after inserting all tuples - it's fairly
    2742              :          * likely that we'll insert into subsequent heap pages that are likely
    2743              :          * to use the same vm page.
    2744              :          */
    2745              :     }
    2746              : 
    2747              :     /* We're done with inserting all tuples, so release the last vmbuffer. */
    2748       456790 :     if (vmbuffer != InvalidBuffer)
    2749         3847 :         ReleaseBuffer(vmbuffer);
    2750              : 
    2751              :     /*
    2752              :      * We're done with the actual inserts.  Check for conflicts again, to
    2753              :      * ensure that all rw-conflicts in to these inserts are detected.  Without
    2754              :      * this final check, a sequential scan of the heap may have locked the
    2755              :      * table after the "before" check, missing one opportunity to detect the
    2756              :      * conflict, and then scanned the table before the new tuples were there,
    2757              :      * missing the other chance to detect the conflict.
    2758              :      *
    2759              :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2760              :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2761              :      * page locks are only consolidated versions of tuple locks; they do not
    2762              :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2763              :      * buffer when making the call.
    2764              :      */
    2765       456790 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2766              : 
    2767              :     /*
    2768              :      * If tuples are cacheable, mark them for invalidation from the caches in
    2769              :      * case we abort.  Note it is OK to do this after releasing the buffer,
    2770              :      * because the heaptuples data structure is all in local memory, not in
    2771              :      * the shared buffer.
    2772              :      */
    2773       456790 :     if (IsCatalogRelation(relation))
    2774              :     {
    2775      1512578 :         for (i = 0; i < ntuples; i++)
    2776      1057248 :             CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
    2777              :     }
    2778              : 
    2779              :     /* copy t_self fields back to the caller's slots */
    2780      2227020 :     for (i = 0; i < ntuples; i++)
    2781      1770230 :         slots[i]->tts_tid = heaptuples[i]->t_self;
    2782              : 
    2783       456790 :     pgstat_count_heap_insert(relation, ntuples);
    2784       456790 : }
    2785              : 
    2786              : /*
    2787              :  *  simple_heap_insert - insert a tuple
    2788              :  *
    2789              :  * Currently, this routine differs from heap_insert only in supplying
    2790              :  * a default command ID and not allowing access to the speedup options.
    2791              :  *
    2792              :  * This should be used rather than using heap_insert directly in most places
    2793              :  * where we are modifying system catalogs.
    2794              :  */
    2795              : void
    2796       992104 : simple_heap_insert(Relation relation, HeapTuple tup)
    2797              : {
    2798       992104 :     heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
    2799       992104 : }
    2800              : 
    2801              : /*
    2802              :  * Given infomask/infomask2, compute the bits that must be saved in the
    2803              :  * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
    2804              :  * xl_heap_lock_updated WAL records.
    2805              :  *
    2806              :  * See fix_infomask_from_infobits.
    2807              :  */
    2808              : static uint8
    2809      2392921 : compute_infobits(uint16 infomask, uint16 infomask2)
    2810              : {
    2811              :     return
    2812      2392921 :         ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
    2813      2392921 :         ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
    2814      2392921 :         ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
    2815              :     /* note we ignore HEAP_XMAX_SHR_LOCK here */
    2816      4785842 :         ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
    2817              :         ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
    2818      2392921 :          XLHL_KEYS_UPDATED : 0);
    2819              : }
    2820              : 
    2821              : /*
    2822              :  * Given two versions of the same t_infomask for a tuple, compare them and
    2823              :  * return whether the relevant status for a tuple Xmax has changed.  This is
    2824              :  * used after a buffer lock has been released and reacquired: we want to ensure
    2825              :  * that the tuple state continues to be the same it was when we previously
    2826              :  * examined it.
    2827              :  *
    2828              :  * Note the Xmax field itself must be compared separately.
    2829              :  */
    2830              : static inline bool
    2831         5398 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
    2832              : {
    2833         5398 :     const uint16 interesting =
    2834              :         HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
    2835              : 
    2836         5398 :     if ((new_infomask & interesting) != (old_infomask & interesting))
    2837           17 :         return true;
    2838              : 
    2839         5381 :     return false;
    2840              : }
    2841              : 
    2842              : /*
    2843              :  *  heap_delete - delete a tuple
    2844              :  *
    2845              :  * See table_tuple_delete() for an explanation of the parameters, except that
    2846              :  * this routine directly takes a tuple rather than a slot.
    2847              :  *
    2848              :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    2849              :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    2850              :  * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
    2851              :  * generated by another transaction).
    2852              :  */
    2853              : TM_Result
    2854      1844687 : heap_delete(Relation relation, const ItemPointerData *tid,
    2855              :             CommandId cid, Snapshot crosscheck, bool wait,
    2856              :             TM_FailureData *tmfd, bool changingPart)
    2857              : {
    2858              :     TM_Result   result;
    2859      1844687 :     TransactionId xid = GetCurrentTransactionId();
    2860              :     ItemId      lp;
    2861              :     HeapTupleData tp;
    2862              :     Page        page;
    2863              :     BlockNumber block;
    2864              :     Buffer      buffer;
    2865      1844687 :     Buffer      vmbuffer = InvalidBuffer;
    2866              :     TransactionId new_xmax;
    2867              :     uint16      new_infomask,
    2868              :                 new_infomask2;
    2869      1844687 :     bool        have_tuple_lock = false;
    2870              :     bool        iscombo;
    2871      1844687 :     bool        all_visible_cleared = false;
    2872      1844687 :     HeapTuple   old_key_tuple = NULL;   /* replica identity of the tuple */
    2873      1844687 :     bool        old_key_copied = false;
    2874              : 
    2875              :     Assert(ItemPointerIsValid(tid));
    2876              : 
    2877      1844687 :     AssertHasSnapshotForToast(relation);
    2878              : 
    2879              :     /*
    2880              :      * Forbid this during a parallel operation, lest it allocate a combo CID.
    2881              :      * Other workers might need that combo CID for visibility checks, and we
    2882              :      * have no provision for broadcasting it to them.
    2883              :      */
    2884      1844687 :     if (IsInParallelMode())
    2885            0 :         ereport(ERROR,
    2886              :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2887              :                  errmsg("cannot delete tuples during a parallel operation")));
    2888              : 
    2889      1844687 :     block = ItemPointerGetBlockNumber(tid);
    2890      1844687 :     buffer = ReadBuffer(relation, block);
    2891      1844687 :     page = BufferGetPage(buffer);
    2892              : 
    2893              :     /*
    2894              :      * Before locking the buffer, pin the visibility map page if it appears to
    2895              :      * be necessary.  Since we haven't got the lock yet, someone else might be
    2896              :      * in the middle of changing this, so we'll need to recheck after we have
    2897              :      * the lock.
    2898              :      */
    2899      1844687 :     if (PageIsAllVisible(page))
    2900          259 :         visibilitymap_pin(relation, block, &vmbuffer);
    2901              : 
    2902      1844687 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2903              : 
    2904      1844687 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    2905              :     Assert(ItemIdIsNormal(lp));
    2906              : 
    2907      1844687 :     tp.t_tableOid = RelationGetRelid(relation);
    2908      1844687 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    2909      1844687 :     tp.t_len = ItemIdGetLength(lp);
    2910      1844687 :     tp.t_self = *tid;
    2911              : 
    2912            1 : l1:
    2913              : 
    2914              :     /*
    2915              :      * If we didn't pin the visibility map page and the page has become all
    2916              :      * visible while we were busy locking the buffer, we'll have to unlock and
    2917              :      * re-lock, to avoid holding the buffer lock across an I/O.  That's a bit
    2918              :      * unfortunate, but hopefully shouldn't happen often.
    2919              :      */
    2920      1844688 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    2921              :     {
    2922            0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2923            0 :         visibilitymap_pin(relation, block, &vmbuffer);
    2924            0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2925              :     }
    2926              : 
    2927      1844688 :     result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
    2928              : 
    2929      1844688 :     if (result == TM_Invisible)
    2930              :     {
    2931            0 :         UnlockReleaseBuffer(buffer);
    2932            0 :         ereport(ERROR,
    2933              :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    2934              :                  errmsg("attempted to delete invisible tuple")));
    2935              :     }
    2936      1844688 :     else if (result == TM_BeingModified && wait)
    2937              :     {
    2938              :         TransactionId xwait;
    2939              :         uint16      infomask;
    2940              : 
    2941              :         /* must copy state data before unlocking buffer */
    2942        40594 :         xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
    2943        40594 :         infomask = tp.t_data->t_infomask;
    2944              : 
    2945              :         /*
    2946              :          * Sleep until concurrent transaction ends -- except when there's a
    2947              :          * single locker and it's our own transaction.  Note we don't care
    2948              :          * which lock mode the locker has, because we need the strongest one.
    2949              :          *
    2950              :          * Before sleeping, we need to acquire tuple lock to establish our
    2951              :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    2952              :          * release us when we are next-in-line for the tuple.
    2953              :          *
    2954              :          * If we are forced to "start over" below, we keep the tuple lock;
    2955              :          * this arranges that we stay at the head of the line while rechecking
    2956              :          * tuple state.
    2957              :          */
    2958        40594 :         if (infomask & HEAP_XMAX_IS_MULTI)
    2959              :         {
    2960            8 :             bool        current_is_member = false;
    2961              : 
    2962            8 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    2963              :                                         LockTupleExclusive, &current_is_member))
    2964              :             {
    2965            8 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2966              : 
    2967              :                 /*
    2968              :                  * Acquire the lock, if necessary (but skip it when we're
    2969              :                  * requesting a lock and already have one; avoids deadlock).
    2970              :                  */
    2971            8 :                 if (!current_is_member)
    2972            6 :                     heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    2973              :                                          LockWaitBlock, &have_tuple_lock);
    2974              : 
    2975              :                 /* wait for multixact */
    2976            8 :                 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
    2977              :                                 relation, &(tp.t_self), XLTW_Delete,
    2978              :                                 NULL);
    2979            8 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2980              : 
    2981              :                 /*
    2982              :                  * If xwait had just locked the tuple then some other xact
    2983              :                  * could update this tuple before we get to this point.  Check
    2984              :                  * for xmax change, and start over if so.
    2985              :                  *
    2986              :                  * We also must start over if we didn't pin the VM page, and
    2987              :                  * the page has become all visible.
    2988              :                  */
    2989           16 :                 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
    2990           16 :                     xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    2991            8 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    2992              :                                          xwait))
    2993            0 :                     goto l1;
    2994              :             }
    2995              : 
    2996              :             /*
    2997              :              * You might think the multixact is necessarily done here, but not
    2998              :              * so: it could have surviving members, namely our own xact or
    2999              :              * other subxacts of this backend.  It is legal for us to delete
    3000              :              * the tuple in either case, however (the latter case is
    3001              :              * essentially a situation of upgrading our former shared lock to
    3002              :              * exclusive).  We don't bother changing the on-disk hint bits
    3003              :              * since we are about to overwrite the xmax altogether.
    3004              :              */
    3005              :         }
    3006        40586 :         else if (!TransactionIdIsCurrentTransactionId(xwait))
    3007              :         {
    3008              :             /*
    3009              :              * Wait for regular transaction to end; but first, acquire tuple
    3010              :              * lock.
    3011              :              */
    3012           52 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3013           52 :             heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    3014              :                                  LockWaitBlock, &have_tuple_lock);
    3015           52 :             XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
    3016           48 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3017              : 
    3018              :             /*
    3019              :              * xwait is done, but if xwait had just locked the tuple then some
    3020              :              * other xact could update this tuple before we get to this point.
    3021              :              * Check for xmax change, and start over if so.
    3022              :              *
    3023              :              * We also must start over if we didn't pin the VM page, and the
    3024              :              * page has become all visible.
    3025              :              */
    3026           96 :             if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
    3027           95 :                 xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    3028           47 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    3029              :                                      xwait))
    3030            1 :                 goto l1;
    3031              : 
    3032              :             /* Otherwise check if it committed or aborted */
    3033           47 :             UpdateXmaxHintBits(tp.t_data, buffer, xwait);
    3034              :         }
    3035              : 
    3036              :         /*
    3037              :          * We may overwrite if previous xmax aborted, or if it committed but
    3038              :          * only locked the tuple without updating it.
    3039              :          */
    3040        81158 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    3041        40600 :             HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
    3042           31 :             HeapTupleHeaderIsOnlyLocked(tp.t_data))
    3043        40562 :             result = TM_Ok;
    3044           27 :         else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    3045           23 :             result = TM_Updated;
    3046              :         else
    3047            4 :             result = TM_Deleted;
    3048              :     }
    3049              : 
    3050              :     /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
    3051              :     if (result != TM_Ok)
    3052              :     {
    3053              :         Assert(result == TM_SelfModified ||
    3054              :                result == TM_Updated ||
    3055              :                result == TM_Deleted ||
    3056              :                result == TM_BeingModified);
    3057              :         Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
    3058              :         Assert(result != TM_Updated ||
    3059              :                !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
    3060              :     }
    3061              : 
    3062      1844683 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    3063              :     {
    3064              :         /* Perform additional check for transaction-snapshot mode RI updates */
    3065            1 :         if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
    3066            1 :             result = TM_Updated;
    3067              :     }
    3068              : 
    3069      1844683 :     if (result != TM_Ok)
    3070              :     {
    3071           69 :         tmfd->ctid = tp.t_data->t_ctid;
    3072           69 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    3073           69 :         if (result == TM_SelfModified)
    3074           28 :             tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
    3075              :         else
    3076           41 :             tmfd->cmax = InvalidCommandId;
    3077           69 :         UnlockReleaseBuffer(buffer);
    3078           69 :         if (have_tuple_lock)
    3079           27 :             UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    3080           69 :         if (vmbuffer != InvalidBuffer)
    3081            0 :             ReleaseBuffer(vmbuffer);
    3082           69 :         return result;
    3083              :     }
    3084              : 
    3085              :     /*
    3086              :      * We're about to do the actual delete -- check for conflict first, to
    3087              :      * avoid possibly having to roll back work we've just done.
    3088              :      *
    3089              :      * This is safe without a recheck as long as there is no possibility of
    3090              :      * another process scanning the page between this check and the delete
    3091              :      * being visible to the scan (i.e., an exclusive buffer content lock is
    3092              :      * continuously held from this point until the tuple delete is visible).
    3093              :      */
    3094      1844614 :     CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
    3095              : 
    3096              :     /* replace cid with a combo CID if necessary */
    3097      1844600 :     HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
    3098              : 
    3099              :     /*
    3100              :      * Compute replica identity tuple before entering the critical section so
    3101              :      * we don't PANIC upon a memory allocation failure.
    3102              :      */
    3103      1844600 :     old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
    3104              : 
    3105              :     /*
    3106              :      * If this is the first possibly-multixact-able operation in the current
    3107              :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    3108              :      * certain that the transaction will never become a member of any older
    3109              :      * MultiXactIds than that.  (We have to do this even if we end up just
    3110              :      * using our own TransactionId below, since some other backend could
    3111              :      * incorporate our XID into a MultiXact immediately afterwards.)
    3112              :      */
    3113      1844600 :     MultiXactIdSetOldestMember();
    3114              : 
    3115      1844600 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
    3116      1844600 :                               tp.t_data->t_infomask, tp.t_data->t_infomask2,
    3117              :                               xid, LockTupleExclusive, true,
    3118              :                               &new_xmax, &new_infomask, &new_infomask2);
    3119              : 
    3120      1844600 :     START_CRIT_SECTION();
    3121              : 
    3122              :     /*
    3123              :      * If this transaction commits, the tuple will become DEAD sooner or
    3124              :      * later.  Set flag that this page is a candidate for pruning once our xid
    3125              :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    3126              :      * the subsequent page pruning will be a no-op and the hint will be
    3127              :      * cleared.
    3128              :      */
    3129      1844600 :     PageSetPrunable(page, xid);
    3130              : 
    3131      1844600 :     if (PageIsAllVisible(page))
    3132              :     {
    3133          259 :         all_visible_cleared = true;
    3134          259 :         PageClearAllVisible(page);
    3135          259 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    3136              :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    3137              :     }
    3138              : 
    3139              :     /* store transaction information of xact deleting the tuple */
    3140      1844600 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3141      1844600 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3142      1844600 :     tp.t_data->t_infomask |= new_infomask;
    3143      1844600 :     tp.t_data->t_infomask2 |= new_infomask2;
    3144      1844600 :     HeapTupleHeaderClearHotUpdated(tp.t_data);
    3145      1844600 :     HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
    3146      1844600 :     HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
    3147              :     /* Make sure there is no forward chain link in t_ctid */
    3148      1844600 :     tp.t_data->t_ctid = tp.t_self;
    3149              : 
    3150              :     /* Signal that this is actually a move into another partition */
    3151      1844600 :     if (changingPart)
    3152          638 :         HeapTupleHeaderSetMovedPartitions(tp.t_data);
    3153              : 
    3154      1844600 :     MarkBufferDirty(buffer);
    3155              : 
    3156              :     /*
    3157              :      * XLOG stuff
    3158              :      *
    3159              :      * NB: heap_abort_speculative() uses the same xlog record and replay
    3160              :      * routines.
    3161              :      */
    3162      1844600 :     if (RelationNeedsWAL(relation))
    3163              :     {
    3164              :         xl_heap_delete xlrec;
    3165              :         xl_heap_header xlhdr;
    3166              :         XLogRecPtr  recptr;
    3167              : 
    3168              :         /*
    3169              :          * For logical decode we need combo CIDs to properly decode the
    3170              :          * catalog
    3171              :          */
    3172      1761340 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    3173         6403 :             log_heap_new_cid(relation, &tp);
    3174              : 
    3175      1761340 :         xlrec.flags = 0;
    3176      1761340 :         if (all_visible_cleared)
    3177          259 :             xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
    3178      1761340 :         if (changingPart)
    3179          638 :             xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
    3180      3522680 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    3181      1761340 :                                               tp.t_data->t_infomask2);
    3182      1761340 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    3183      1761340 :         xlrec.xmax = new_xmax;
    3184              : 
    3185      1761340 :         if (old_key_tuple != NULL)
    3186              :         {
    3187        47019 :             if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    3188          132 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
    3189              :             else
    3190        46887 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
    3191              :         }
    3192              : 
    3193      1761340 :         XLogBeginInsert();
    3194      1761340 :         XLogRegisterData(&xlrec, SizeOfHeapDelete);
    3195              : 
    3196      1761340 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3197              : 
    3198              :         /*
    3199              :          * Log replica identity of the deleted tuple if there is one
    3200              :          */
    3201      1761340 :         if (old_key_tuple != NULL)
    3202              :         {
    3203        47019 :             xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    3204        47019 :             xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
    3205        47019 :             xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
    3206              : 
    3207        47019 :             XLogRegisterData(&xlhdr, SizeOfHeapHeader);
    3208        47019 :             XLogRegisterData((char *) old_key_tuple->t_data
    3209              :                              + SizeofHeapTupleHeader,
    3210        47019 :                              old_key_tuple->t_len
    3211              :                              - SizeofHeapTupleHeader);
    3212              :         }
    3213              : 
    3214              :         /* filtering by origin on a row level is much more efficient */
    3215      1761340 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    3216              : 
    3217      1761340 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    3218              : 
    3219      1761340 :         PageSetLSN(page, recptr);
    3220              :     }
    3221              : 
    3222      1844600 :     END_CRIT_SECTION();
    3223              : 
    3224      1844600 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3225              : 
    3226      1844600 :     if (vmbuffer != InvalidBuffer)
    3227          259 :         ReleaseBuffer(vmbuffer);
    3228              : 
    3229              :     /*
    3230              :      * If the tuple has toasted out-of-line attributes, we need to delete
    3231              :      * those items too.  We have to do this before releasing the buffer
    3232              :      * because we need to look at the contents of the tuple, but it's OK to
    3233              :      * release the content lock on the buffer first.
    3234              :      */
    3235      1848329 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3236         3742 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3237              :     {
    3238              :         /* toast table entries should never be recursively toasted */
    3239              :         Assert(!HeapTupleHasExternal(&tp));
    3240              :     }
    3241      1840871 :     else if (HeapTupleHasExternal(&tp))
    3242          504 :         heap_toast_delete(relation, &tp, false);
    3243              : 
    3244              :     /*
    3245              :      * Mark tuple for invalidation from system caches at next command
    3246              :      * boundary. We have to do this before releasing the buffer because we
    3247              :      * need to look at the contents of the tuple.
    3248              :      */
    3249      1844600 :     CacheInvalidateHeapTuple(relation, &tp, NULL);
    3250              : 
    3251              :     /* Now we can release the buffer */
    3252      1844600 :     ReleaseBuffer(buffer);
    3253              : 
    3254              :     /*
    3255              :      * Release the lmgr tuple lock, if we had it.
    3256              :      */
    3257      1844600 :     if (have_tuple_lock)
    3258           26 :         UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    3259              : 
    3260      1844600 :     pgstat_count_heap_delete(relation);
    3261              : 
    3262      1844600 :     if (old_key_tuple != NULL && old_key_copied)
    3263        46888 :         heap_freetuple(old_key_tuple);
    3264              : 
    3265      1844600 :     return TM_Ok;
    3266              : }
    3267              : 
    3268              : /*
    3269              :  *  simple_heap_delete - delete a tuple
    3270              :  *
    3271              :  * This routine may be used to delete a tuple when concurrent updates of
    3272              :  * the target tuple are not expected (for example, because we have a lock
    3273              :  * on the relation associated with the tuple).  Any failure is reported
    3274              :  * via ereport().
    3275              :  */
    3276              : void
    3277       793293 : simple_heap_delete(Relation relation, const ItemPointerData *tid)
    3278              : {
    3279              :     TM_Result   result;
    3280              :     TM_FailureData tmfd;
    3281              : 
    3282       793293 :     result = heap_delete(relation, tid,
    3283              :                          GetCurrentCommandId(true), InvalidSnapshot,
    3284              :                          true /* wait for commit */ ,
    3285              :                          &tmfd, false /* changingPart */ );
    3286       793293 :     switch (result)
    3287              :     {
    3288            0 :         case TM_SelfModified:
    3289              :             /* Tuple was already updated in current command? */
    3290            0 :             elog(ERROR, "tuple already updated by self");
    3291              :             break;
    3292              : 
    3293       793293 :         case TM_Ok:
    3294              :             /* done successfully */
    3295       793293 :             break;
    3296              : 
    3297            0 :         case TM_Updated:
    3298            0 :             elog(ERROR, "tuple concurrently updated");
    3299              :             break;
    3300              : 
    3301            0 :         case TM_Deleted:
    3302            0 :             elog(ERROR, "tuple concurrently deleted");
    3303              :             break;
    3304              : 
    3305            0 :         default:
    3306            0 :             elog(ERROR, "unrecognized heap_delete status: %u", result);
    3307              :             break;
    3308              :     }
    3309       793293 : }
    3310              : 
    3311              : /*
    3312              :  *  heap_update - replace a tuple
    3313              :  *
    3314              :  * See table_tuple_update() for an explanation of the parameters, except that
    3315              :  * this routine directly takes a tuple rather than a slot.
    3316              :  *
    3317              :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    3318              :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    3319              :  * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
    3320              :  * generated by another transaction).
    3321              :  */
    3322              : TM_Result
    3323       336595 : heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
    3324              :             CommandId cid, Snapshot crosscheck, bool wait,
    3325              :             TM_FailureData *tmfd, LockTupleMode *lockmode,
    3326              :             TU_UpdateIndexes *update_indexes)
    3327              : {
    3328              :     TM_Result   result;
    3329       336595 :     TransactionId xid = GetCurrentTransactionId();
    3330              :     Bitmapset  *hot_attrs;
    3331              :     Bitmapset  *sum_attrs;
    3332              :     Bitmapset  *key_attrs;
    3333              :     Bitmapset  *id_attrs;
    3334              :     Bitmapset  *interesting_attrs;
    3335              :     Bitmapset  *modified_attrs;
    3336              :     ItemId      lp;
    3337              :     HeapTupleData oldtup;
    3338              :     HeapTuple   heaptup;
    3339       336595 :     HeapTuple   old_key_tuple = NULL;
    3340       336595 :     bool        old_key_copied = false;
    3341              :     Page        page,
    3342              :                 newpage;
    3343              :     BlockNumber block;
    3344              :     MultiXactStatus mxact_status;
    3345              :     Buffer      buffer,
    3346              :                 newbuf,
    3347       336595 :                 vmbuffer = InvalidBuffer,
    3348       336595 :                 vmbuffer_new = InvalidBuffer;
    3349              :     bool        need_toast;
    3350              :     Size        newtupsize,
    3351              :                 pagefree;
    3352       336595 :     bool        have_tuple_lock = false;
    3353              :     bool        iscombo;
    3354       336595 :     bool        use_hot_update = false;
    3355       336595 :     bool        summarized_update = false;
    3356              :     bool        key_intact;
    3357       336595 :     bool        all_visible_cleared = false;
    3358       336595 :     bool        all_visible_cleared_new = false;
    3359              :     bool        checked_lockers;
    3360              :     bool        locker_remains;
    3361       336595 :     bool        id_has_external = false;
    3362              :     TransactionId xmax_new_tuple,
    3363              :                 xmax_old_tuple;
    3364              :     uint16      infomask_old_tuple,
    3365              :                 infomask2_old_tuple,
    3366              :                 infomask_new_tuple,
    3367              :                 infomask2_new_tuple;
    3368              : 
    3369              :     Assert(ItemPointerIsValid(otid));
    3370              : 
    3371              :     /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
    3372              :     Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
    3373              :            RelationGetNumberOfAttributes(relation));
    3374              : 
    3375       336595 :     AssertHasSnapshotForToast(relation);
    3376              : 
    3377              :     /*
    3378              :      * Forbid this during a parallel operation, lest it allocate a combo CID.
    3379              :      * Other workers might need that combo CID for visibility checks, and we
    3380              :      * have no provision for broadcasting it to them.
    3381              :      */
    3382       336595 :     if (IsInParallelMode())
    3383            0 :         ereport(ERROR,
    3384              :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    3385              :                  errmsg("cannot update tuples during a parallel operation")));
    3386              : 
    3387              : #ifdef USE_ASSERT_CHECKING
    3388              :     check_lock_if_inplace_updateable_rel(relation, otid, newtup);
    3389              : #endif
    3390              : 
    3391              :     /*
    3392              :      * Fetch the list of attributes to be checked for various operations.
    3393              :      *
    3394              :      * For HOT considerations, this is wasted effort if we fail to update or
    3395              :      * have to put the new tuple on a different page.  But we must compute the
    3396              :      * list before obtaining buffer lock --- in the worst case, if we are
    3397              :      * doing an update on one of the relevant system catalogs, we could
    3398              :      * deadlock if we try to fetch the list later.  In any case, the relcache
    3399              :      * caches the data so this is usually pretty cheap.
    3400              :      *
    3401              :      * We also need columns used by the replica identity and columns that are
    3402              :      * considered the "key" of rows in the table.
    3403              :      *
    3404              :      * Note that we get copies of each bitmap, so we need not worry about
    3405              :      * relcache flush happening midway through.
    3406              :      */
    3407       336595 :     hot_attrs = RelationGetIndexAttrBitmap(relation,
    3408              :                                            INDEX_ATTR_BITMAP_HOT_BLOCKING);
    3409       336595 :     sum_attrs = RelationGetIndexAttrBitmap(relation,
    3410              :                                            INDEX_ATTR_BITMAP_SUMMARIZED);
    3411       336595 :     key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
    3412       336595 :     id_attrs = RelationGetIndexAttrBitmap(relation,
    3413              :                                           INDEX_ATTR_BITMAP_IDENTITY_KEY);
    3414       336595 :     interesting_attrs = NULL;
    3415       336595 :     interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
    3416       336595 :     interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
    3417       336595 :     interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
    3418       336595 :     interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
    3419              : 
    3420       336595 :     block = ItemPointerGetBlockNumber(otid);
    3421       336595 :     INJECTION_POINT("heap_update-before-pin", NULL);
    3422       336595 :     buffer = ReadBuffer(relation, block);
    3423       336595 :     page = BufferGetPage(buffer);
    3424              : 
    3425              :     /*
    3426              :      * Before locking the buffer, pin the visibility map page if it appears to
    3427              :      * be necessary.  Since we haven't got the lock yet, someone else might be
    3428              :      * in the middle of changing this, so we'll need to recheck after we have
    3429              :      * the lock.
    3430              :      */
    3431       336595 :     if (PageIsAllVisible(page))
    3432         2116 :         visibilitymap_pin(relation, block, &vmbuffer);
    3433              : 
    3434       336595 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3435              : 
    3436       336595 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
    3437              : 
    3438              :     /*
    3439              :      * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
    3440              :      * we see LP_NORMAL here.  When the otid origin is a syscache, we may have
    3441              :      * neither a pin nor a snapshot.  Hence, we may see other LP_ states, each
    3442              :      * of which indicates concurrent pruning.
    3443              :      *
    3444              :      * Failing with TM_Updated would be most accurate.  However, unlike other
    3445              :      * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
    3446              :      * LP_DEAD cases.  While the distinction between TM_Updated and TM_Deleted
    3447              :      * does matter to SQL statements UPDATE and MERGE, those SQL statements
    3448              :      * hold a snapshot that ensures LP_NORMAL.  Hence, the choice between
    3449              :      * TM_Updated and TM_Deleted affects only the wording of error messages.
    3450              :      * Settle on TM_Deleted, for two reasons.  First, it avoids complicating
    3451              :      * the specification of when tmfd->ctid is valid.  Second, it creates
    3452              :      * error log evidence that we took this branch.
    3453              :      *
    3454              :      * Since it's possible to see LP_UNUSED at otid, it's also possible to see
    3455              :      * LP_NORMAL for a tuple that replaced LP_UNUSED.  If it's a tuple for an
    3456              :      * unrelated row, we'll fail with "duplicate key value violates unique".
    3457              :      * XXX if otid is the live, newer version of the newtup row, we'll discard
    3458              :      * changes originating in versions of this catalog row after the version
    3459              :      * the caller got from syscache.  See syscache-update-pruned.spec.
    3460              :      */
    3461       336595 :     if (!ItemIdIsNormal(lp))
    3462              :     {
    3463              :         Assert(RelationSupportsSysCache(RelationGetRelid(relation)));
    3464              : 
    3465            1 :         UnlockReleaseBuffer(buffer);
    3466              :         Assert(!have_tuple_lock);
    3467            1 :         if (vmbuffer != InvalidBuffer)
    3468            1 :             ReleaseBuffer(vmbuffer);
    3469            1 :         tmfd->ctid = *otid;
    3470            1 :         tmfd->xmax = InvalidTransactionId;
    3471            1 :         tmfd->cmax = InvalidCommandId;
    3472            1 :         *update_indexes = TU_None;
    3473              : 
    3474            1 :         bms_free(hot_attrs);
    3475            1 :         bms_free(sum_attrs);
    3476            1 :         bms_free(key_attrs);
    3477            1 :         bms_free(id_attrs);
    3478              :         /* modified_attrs not yet initialized */
    3479            1 :         bms_free(interesting_attrs);
    3480            1 :         return TM_Deleted;
    3481              :     }
    3482              : 
    3483              :     /*
    3484              :      * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
    3485              :      * properly.
    3486              :      */
    3487       336594 :     oldtup.t_tableOid = RelationGetRelid(relation);
    3488       336594 :     oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    3489       336594 :     oldtup.t_len = ItemIdGetLength(lp);
    3490       336594 :     oldtup.t_self = *otid;
    3491              : 
    3492              :     /* the new tuple is ready, except for this: */
    3493       336594 :     newtup->t_tableOid = RelationGetRelid(relation);
    3494              : 
    3495              :     /*
    3496              :      * Determine columns modified by the update.  Additionally, identify
    3497              :      * whether any of the unmodified replica identity key attributes in the
    3498              :      * old tuple is externally stored or not.  This is required because for
    3499              :      * such attributes the flattened value won't be WAL logged as part of the
    3500              :      * new tuple so we must include it as part of the old_key_tuple.  See
    3501              :      * ExtractReplicaIdentity.
    3502              :      */
    3503       336594 :     modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
    3504              :                                               id_attrs, &oldtup,
    3505              :                                               newtup, &id_has_external);
    3506              : 
    3507              :     /*
    3508              :      * If we're not updating any "key" column, we can grab a weaker lock type.
    3509              :      * This allows for more concurrency when we are running simultaneously
    3510              :      * with foreign key checks.
    3511              :      *
    3512              :      * Note that if a column gets detoasted while executing the update, but
    3513              :      * the value ends up being the same, this test will fail and we will use
    3514              :      * the stronger lock.  This is acceptable; the important case to optimize
    3515              :      * is updates that don't manipulate key columns, not those that
    3516              :      * serendipitously arrive at the same key values.
    3517              :      */
    3518       336594 :     if (!bms_overlap(modified_attrs, key_attrs))
    3519              :     {
    3520       331037 :         *lockmode = LockTupleNoKeyExclusive;
    3521       331037 :         mxact_status = MultiXactStatusNoKeyUpdate;
    3522       331037 :         key_intact = true;
    3523              : 
    3524              :         /*
    3525              :          * If this is the first possibly-multixact-able operation in the
    3526              :          * current transaction, set my per-backend OldestMemberMXactId
    3527              :          * setting. We can be certain that the transaction will never become a
    3528              :          * member of any older MultiXactIds than that.  (We have to do this
    3529              :          * even if we end up just using our own TransactionId below, since
    3530              :          * some other backend could incorporate our XID into a MultiXact
    3531              :          * immediately afterwards.)
    3532              :          */
    3533       331037 :         MultiXactIdSetOldestMember();
    3534              :     }
    3535              :     else
    3536              :     {
    3537         5557 :         *lockmode = LockTupleExclusive;
    3538         5557 :         mxact_status = MultiXactStatusUpdate;
    3539         5557 :         key_intact = false;
    3540              :     }
    3541              : 
    3542              :     /*
    3543              :      * Note: beyond this point, use oldtup not otid to refer to old tuple.
    3544              :      * otid may very well point at newtup->t_self, which we will overwrite
    3545              :      * with the new tuple's location, so there's great risk of confusion if we
    3546              :      * use otid anymore.
    3547              :      */
    3548              : 
    3549            1 : l2:
    3550       336595 :     checked_lockers = false;
    3551       336595 :     locker_remains = false;
    3552       336595 :     result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
    3553              : 
    3554              :     /* see below about the "no wait" case */
    3555              :     Assert(result != TM_BeingModified || wait);
    3556              : 
    3557       336595 :     if (result == TM_Invisible)
    3558              :     {
    3559            0 :         UnlockReleaseBuffer(buffer);
    3560            0 :         ereport(ERROR,
    3561              :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    3562              :                  errmsg("attempted to update invisible tuple")));
    3563              :     }
    3564       336595 :     else if (result == TM_BeingModified && wait)
    3565              :     {
    3566              :         TransactionId xwait;
    3567              :         uint16      infomask;
    3568        36443 :         bool        can_continue = false;
    3569              : 
    3570              :         /*
    3571              :          * XXX note that we don't consider the "no wait" case here.  This
    3572              :          * isn't a problem currently because no caller uses that case, but it
    3573              :          * should be fixed if such a caller is introduced.  It wasn't a
    3574              :          * problem previously because this code would always wait, but now
    3575              :          * that some tuple locks do not conflict with one of the lock modes we
    3576              :          * use, it is possible that this case is interesting to handle
    3577              :          * specially.
    3578              :          *
    3579              :          * This may cause failures with third-party code that calls
    3580              :          * heap_update directly.
    3581              :          */
    3582              : 
    3583              :         /* must copy state data before unlocking buffer */
    3584        36443 :         xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3585        36443 :         infomask = oldtup.t_data->t_infomask;
    3586              : 
    3587              :         /*
    3588              :          * Now we have to do something about the existing locker.  If it's a
    3589              :          * multi, sleep on it; we might be awakened before it is completely
    3590              :          * gone (or even not sleep at all in some cases); we need to preserve
    3591              :          * it as locker, unless it is gone completely.
    3592              :          *
    3593              :          * If it's not a multi, we need to check for sleeping conditions
    3594              :          * before actually going to sleep.  If the update doesn't conflict
    3595              :          * with the locks, we just continue without sleeping (but making sure
    3596              :          * it is preserved).
    3597              :          *
    3598              :          * Before sleeping, we need to acquire tuple lock to establish our
    3599              :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    3600              :          * release us when we are next-in-line for the tuple.  Note we must
    3601              :          * not acquire the tuple lock until we're sure we're going to sleep;
    3602              :          * otherwise we're open for race conditions with other transactions
    3603              :          * holding the tuple lock which sleep on us.
    3604              :          *
    3605              :          * If we are forced to "start over" below, we keep the tuple lock;
    3606              :          * this arranges that we stay at the head of the line while rechecking
    3607              :          * tuple state.
    3608              :          */
    3609        36443 :         if (infomask & HEAP_XMAX_IS_MULTI)
    3610              :         {
    3611              :             TransactionId update_xact;
    3612              :             int         remain;
    3613          179 :             bool        current_is_member = false;
    3614              : 
    3615          179 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    3616              :                                         *lockmode, &current_is_member))
    3617              :             {
    3618            8 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3619              : 
    3620              :                 /*
    3621              :                  * Acquire the lock, if necessary (but skip it when we're
    3622              :                  * requesting a lock and already have one; avoids deadlock).
    3623              :                  */
    3624            8 :                 if (!current_is_member)
    3625            0 :                     heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3626              :                                          LockWaitBlock, &have_tuple_lock);
    3627              : 
    3628              :                 /* wait for multixact */
    3629            8 :                 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
    3630              :                                 relation, &oldtup.t_self, XLTW_Update,
    3631              :                                 &remain);
    3632            8 :                 checked_lockers = true;
    3633            8 :                 locker_remains = remain != 0;
    3634            8 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3635              : 
    3636              :                 /*
    3637              :                  * If xwait had just locked the tuple then some other xact
    3638              :                  * could update this tuple before we get to this point.  Check
    3639              :                  * for xmax change, and start over if so.
    3640              :                  */
    3641            8 :                 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
    3642            8 :                                           infomask) ||
    3643            8 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3644              :                                          xwait))
    3645            0 :                     goto l2;
    3646              :             }
    3647              : 
    3648              :             /*
    3649              :              * Note that the multixact may not be done by now.  It could have
    3650              :              * surviving members; our own xact or other subxacts of this
    3651              :              * backend, and also any other concurrent transaction that locked
    3652              :              * the tuple with LockTupleKeyShare if we only got
    3653              :              * LockTupleNoKeyExclusive.  If this is the case, we have to be
    3654              :              * careful to mark the updated tuple with the surviving members in
    3655              :              * Xmax.
    3656              :              *
    3657              :              * Note that there could have been another update in the
    3658              :              * MultiXact. In that case, we need to check whether it committed
    3659              :              * or aborted. If it aborted we are safe to update it again;
    3660              :              * otherwise there is an update conflict, and we have to return
    3661              :              * TableTuple{Deleted, Updated} below.
    3662              :              *
    3663              :              * In the LockTupleExclusive case, we still need to preserve the
    3664              :              * surviving members: those would include the tuple locks we had
    3665              :              * before this one, which are important to keep in case this
    3666              :              * subxact aborts.
    3667              :              */
    3668          179 :             if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
    3669            8 :                 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
    3670              :             else
    3671          171 :                 update_xact = InvalidTransactionId;
    3672              : 
    3673              :             /*
    3674              :              * There was no UPDATE in the MultiXact; or it aborted. No
    3675              :              * TransactionIdIsInProgress() call needed here, since we called
    3676              :              * MultiXactIdWait() above.
    3677              :              */
    3678          187 :             if (!TransactionIdIsValid(update_xact) ||
    3679            8 :                 TransactionIdDidAbort(update_xact))
    3680          172 :                 can_continue = true;
    3681              :         }
    3682        36264 :         else if (TransactionIdIsCurrentTransactionId(xwait))
    3683              :         {
    3684              :             /*
    3685              :              * The only locker is ourselves; we can avoid grabbing the tuple
    3686              :              * lock here, but must preserve our locking information.
    3687              :              */
    3688        36153 :             checked_lockers = true;
    3689        36153 :             locker_remains = true;
    3690        36153 :             can_continue = true;
    3691              :         }
    3692          111 :         else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
    3693              :         {
    3694              :             /*
    3695              :              * If it's just a key-share locker, and we're not changing the key
    3696              :              * columns, we don't need to wait for it to end; but we need to
    3697              :              * preserve it as locker.
    3698              :              */
    3699           29 :             checked_lockers = true;
    3700           29 :             locker_remains = true;
    3701           29 :             can_continue = true;
    3702              :         }
    3703              :         else
    3704              :         {
    3705              :             /*
    3706              :              * Wait for regular transaction to end; but first, acquire tuple
    3707              :              * lock.
    3708              :              */
    3709           82 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3710           82 :             heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3711              :                                  LockWaitBlock, &have_tuple_lock);
    3712           82 :             XactLockTableWait(xwait, relation, &oldtup.t_self,
    3713              :                               XLTW_Update);
    3714           82 :             checked_lockers = true;
    3715           82 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3716              : 
    3717              :             /*
    3718              :              * xwait is done, but if xwait had just locked the tuple then some
    3719              :              * other xact could update this tuple before we get to this point.
    3720              :              * Check for xmax change, and start over if so.
    3721              :              */
    3722          163 :             if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
    3723           81 :                 !TransactionIdEquals(xwait,
    3724              :                                      HeapTupleHeaderGetRawXmax(oldtup.t_data)))
    3725            1 :                 goto l2;
    3726              : 
    3727              :             /* Otherwise check if it committed or aborted */
    3728           81 :             UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
    3729           81 :             if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
    3730           22 :                 can_continue = true;
    3731              :         }
    3732              : 
    3733        36442 :         if (can_continue)
    3734        36376 :             result = TM_Ok;
    3735           66 :         else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
    3736           61 :             result = TM_Updated;
    3737              :         else
    3738            5 :             result = TM_Deleted;
    3739              :     }
    3740              : 
    3741              :     /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
    3742              :     if (result != TM_Ok)
    3743              :     {
    3744              :         Assert(result == TM_SelfModified ||
    3745              :                result == TM_Updated ||
    3746              :                result == TM_Deleted ||
    3747              :                result == TM_BeingModified);
    3748              :         Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
    3749              :         Assert(result != TM_Updated ||
    3750              :                !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
    3751              :     }
    3752              : 
    3753       336594 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    3754              :     {
    3755              :         /* Perform additional check for transaction-snapshot mode RI updates */
    3756            1 :         if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
    3757            1 :             result = TM_Updated;
    3758              :     }
    3759              : 
    3760       336594 :     if (result != TM_Ok)
    3761              :     {
    3762          181 :         tmfd->ctid = oldtup.t_data->t_ctid;
    3763          181 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
    3764          181 :         if (result == TM_SelfModified)
    3765           69 :             tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
    3766              :         else
    3767          112 :             tmfd->cmax = InvalidCommandId;
    3768          181 :         UnlockReleaseBuffer(buffer);
    3769          181 :         if (have_tuple_lock)
    3770           59 :             UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    3771          181 :         if (vmbuffer != InvalidBuffer)
    3772            0 :             ReleaseBuffer(vmbuffer);
    3773          181 :         *update_indexes = TU_None;
    3774              : 
    3775          181 :         bms_free(hot_attrs);
    3776          181 :         bms_free(sum_attrs);
    3777          181 :         bms_free(key_attrs);
    3778          181 :         bms_free(id_attrs);
    3779          181 :         bms_free(modified_attrs);
    3780          181 :         bms_free(interesting_attrs);
    3781          181 :         return result;
    3782              :     }
    3783              : 
    3784              :     /*
    3785              :      * If we didn't pin the visibility map page and the page has become all
    3786              :      * visible while we were busy locking the buffer, or during some
    3787              :      * subsequent window during which we had it unlocked, we'll have to unlock
    3788              :      * and re-lock, to avoid holding the buffer lock across an I/O.  That's a
    3789              :      * bit unfortunate, especially since we'll now have to recheck whether the
    3790              :      * tuple has been locked or updated under us, but hopefully it won't
    3791              :      * happen very often.
    3792              :      */
    3793       336413 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    3794              :     {
    3795            0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3796            0 :         visibilitymap_pin(relation, block, &vmbuffer);
    3797            0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3798            0 :         goto l2;
    3799              :     }
    3800              : 
    3801              :     /* Fill in transaction status data */
    3802              : 
    3803              :     /*
    3804              :      * If the tuple we're updating is locked, we need to preserve the locking
    3805              :      * info in the old tuple's Xmax.  Prepare a new Xmax value for this.
    3806              :      */
    3807       336413 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3808       336413 :                               oldtup.t_data->t_infomask,
    3809       336413 :                               oldtup.t_data->t_infomask2,
    3810              :                               xid, *lockmode, true,
    3811              :                               &xmax_old_tuple, &infomask_old_tuple,
    3812              :                               &infomask2_old_tuple);
    3813              : 
    3814              :     /*
    3815              :      * And also prepare an Xmax value for the new copy of the tuple.  If there
    3816              :      * was no xmax previously, or there was one but all lockers are now gone,
    3817              :      * then use InvalidTransactionId; otherwise, get the xmax from the old
    3818              :      * tuple.  (In rare cases that might also be InvalidTransactionId and yet
    3819              :      * not have the HEAP_XMAX_INVALID bit set; that's fine.)
    3820              :      */
    3821       372767 :     if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    3822        72708 :         HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
    3823        36183 :         (checked_lockers && !locker_remains))
    3824       300059 :         xmax_new_tuple = InvalidTransactionId;
    3825              :     else
    3826        36354 :         xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3827              : 
    3828       336413 :     if (!TransactionIdIsValid(xmax_new_tuple))
    3829              :     {
    3830       300059 :         infomask_new_tuple = HEAP_XMAX_INVALID;
    3831       300059 :         infomask2_new_tuple = 0;
    3832              :     }
    3833              :     else
    3834              :     {
    3835              :         /*
    3836              :          * If we found a valid Xmax for the new tuple, then the infomask bits
    3837              :          * to use on the new tuple depend on what was there on the old one.
    3838              :          * Note that since we're doing an update, the only possibility is that
    3839              :          * the lockers had FOR KEY SHARE lock.
    3840              :          */
    3841        36354 :         if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
    3842              :         {
    3843          172 :             GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
    3844              :                                    &infomask2_new_tuple);
    3845              :         }
    3846              :         else
    3847              :         {
    3848        36182 :             infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
    3849        36182 :             infomask2_new_tuple = 0;
    3850              :         }
    3851              :     }
    3852              : 
    3853              :     /*
    3854              :      * Prepare the new tuple with the appropriate initial values of Xmin and
    3855              :      * Xmax, as well as initial infomask bits as computed above.
    3856              :      */
    3857       336413 :     newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    3858       336413 :     newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    3859       336413 :     HeapTupleHeaderSetXmin(newtup->t_data, xid);
    3860       336413 :     HeapTupleHeaderSetCmin(newtup->t_data, cid);
    3861       336413 :     newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
    3862       336413 :     newtup->t_data->t_infomask2 |= infomask2_new_tuple;
    3863       336413 :     HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
    3864              : 
    3865              :     /*
    3866              :      * Replace cid with a combo CID if necessary.  Note that we already put
    3867              :      * the plain cid into the new tuple.
    3868              :      */
    3869       336413 :     HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
    3870              : 
    3871              :     /*
    3872              :      * If the toaster needs to be activated, OR if the new tuple will not fit
    3873              :      * on the same page as the old, then we need to release the content lock
    3874              :      * (but not the pin!) on the old tuple's buffer while we are off doing
    3875              :      * TOAST and/or table-file-extension work.  We must mark the old tuple to
    3876              :      * show that it's locked, else other processes may try to update it
    3877              :      * themselves.
    3878              :      *
    3879              :      * We need to invoke the toaster if there are already any out-of-line
    3880              :      * toasted values present, or if the new tuple is over-threshold.
    3881              :      */
    3882       336413 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3883            0 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3884              :     {
    3885              :         /* toast table entries should never be recursively toasted */
    3886              :         Assert(!HeapTupleHasExternal(&oldtup));
    3887              :         Assert(!HeapTupleHasExternal(newtup));
    3888            0 :         need_toast = false;
    3889              :     }
    3890              :     else
    3891      1008736 :         need_toast = (HeapTupleHasExternal(&oldtup) ||
    3892       672323 :                       HeapTupleHasExternal(newtup) ||
    3893       335878 :                       newtup->t_len > TOAST_TUPLE_THRESHOLD);
    3894              : 
    3895       336413 :     pagefree = PageGetHeapFreeSpace(page);
    3896              : 
    3897       336413 :     newtupsize = MAXALIGN(newtup->t_len);
    3898              : 
    3899       336413 :     if (need_toast || newtupsize > pagefree)
    3900       157320 :     {
    3901              :         TransactionId xmax_lock_old_tuple;
    3902              :         uint16      infomask_lock_old_tuple,
    3903              :                     infomask2_lock_old_tuple;
    3904       157320 :         bool        cleared_all_frozen = false;
    3905              : 
    3906              :         /*
    3907              :          * To prevent concurrent sessions from updating the tuple, we have to
    3908              :          * temporarily mark it locked, while we release the page-level lock.
    3909              :          *
    3910              :          * To satisfy the rule that any xid potentially appearing in a buffer
    3911              :          * written out to disk, we unfortunately have to WAL log this
    3912              :          * temporary modification.  We can reuse xl_heap_lock for this
    3913              :          * purpose.  If we crash/error before following through with the
    3914              :          * actual update, xmax will be of an aborted transaction, allowing
    3915              :          * other sessions to proceed.
    3916              :          */
    3917              : 
    3918              :         /*
    3919              :          * Compute xmax / infomask appropriate for locking the tuple. This has
    3920              :          * to be done separately from the combo that's going to be used for
    3921              :          * updating, because the potentially created multixact would otherwise
    3922              :          * be wrong.
    3923              :          */
    3924       157320 :         compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3925       157320 :                                   oldtup.t_data->t_infomask,
    3926       157320 :                                   oldtup.t_data->t_infomask2,
    3927              :                                   xid, *lockmode, false,
    3928              :                                   &xmax_lock_old_tuple, &infomask_lock_old_tuple,
    3929              :                                   &infomask2_lock_old_tuple);
    3930              : 
    3931              :         Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
    3932              : 
    3933       157320 :         START_CRIT_SECTION();
    3934              : 
    3935              :         /* Clear obsolete visibility flags ... */
    3936       157320 :         oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3937       157320 :         oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3938       157320 :         HeapTupleClearHotUpdated(&oldtup);
    3939              :         /* ... and store info about transaction updating this tuple */
    3940              :         Assert(TransactionIdIsValid(xmax_lock_old_tuple));
    3941       157320 :         HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
    3942       157320 :         oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
    3943       157320 :         oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
    3944       157320 :         HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    3945              : 
    3946              :         /* temporarily make it look not-updated, but locked */
    3947       157320 :         oldtup.t_data->t_ctid = oldtup.t_self;
    3948              : 
    3949              :         /*
    3950              :          * Clear all-frozen bit on visibility map if needed. We could
    3951              :          * immediately reset ALL_VISIBLE, but given that the WAL logging
    3952              :          * overhead would be unchanged, that doesn't seem necessarily
    3953              :          * worthwhile.
    3954              :          */
    3955       158442 :         if (PageIsAllVisible(page) &&
    3956         1122 :             visibilitymap_clear(relation, block, vmbuffer,
    3957              :                                 VISIBILITYMAP_ALL_FROZEN))
    3958          883 :             cleared_all_frozen = true;
    3959              : 
    3960       157320 :         MarkBufferDirty(buffer);
    3961              : 
    3962       157320 :         if (RelationNeedsWAL(relation))
    3963              :         {
    3964              :             xl_heap_lock xlrec;
    3965              :             XLogRecPtr  recptr;
    3966              : 
    3967       147181 :             XLogBeginInsert();
    3968       147181 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3969              : 
    3970       147181 :             xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
    3971       147181 :             xlrec.xmax = xmax_lock_old_tuple;
    3972       294362 :             xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
    3973       147181 :                                                   oldtup.t_data->t_infomask2);
    3974       147181 :             xlrec.flags =
    3975       147181 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    3976       147181 :             XLogRegisterData(&xlrec, SizeOfHeapLock);
    3977       147181 :             recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    3978       147181 :             PageSetLSN(page, recptr);
    3979              :         }
    3980              : 
    3981       157320 :         END_CRIT_SECTION();
    3982              : 
    3983       157320 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3984              : 
    3985              :         /*
    3986              :          * Let the toaster do its thing, if needed.
    3987              :          *
    3988              :          * Note: below this point, heaptup is the data we actually intend to
    3989              :          * store into the relation; newtup is the caller's original untoasted
    3990              :          * data.
    3991              :          */
    3992       157320 :         if (need_toast)
    3993              :         {
    3994              :             /* Note we always use WAL and FSM during updates */
    3995         1834 :             heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
    3996         1834 :             newtupsize = MAXALIGN(heaptup->t_len);
    3997              :         }
    3998              :         else
    3999       155486 :             heaptup = newtup;
    4000              : 
    4001              :         /*
    4002              :          * Now, do we need a new page for the tuple, or not?  This is a bit
    4003              :          * tricky since someone else could have added tuples to the page while
    4004              :          * we weren't looking.  We have to recheck the available space after
    4005              :          * reacquiring the buffer lock.  But don't bother to do that if the
    4006              :          * former amount of free space is still not enough; it's unlikely
    4007              :          * there's more free now than before.
    4008              :          *
    4009              :          * What's more, if we need to get a new page, we will need to acquire
    4010              :          * buffer locks on both old and new pages.  To avoid deadlock against
    4011              :          * some other backend trying to get the same two locks in the other
    4012              :          * order, we must be consistent about the order we get the locks in.
    4013              :          * We use the rule "lock the lower-numbered page of the relation
    4014              :          * first".  To implement this, we must do RelationGetBufferForTuple
    4015              :          * while not holding the lock on the old page, and we must rely on it
    4016              :          * to get the locks on both pages in the correct order.
    4017              :          *
    4018              :          * Another consideration is that we need visibility map page pin(s) if
    4019              :          * we will have to clear the all-visible flag on either page.  If we
    4020              :          * call RelationGetBufferForTuple, we rely on it to acquire any such
    4021              :          * pins; but if we don't, we have to handle that here.  Hence we need
    4022              :          * a loop.
    4023              :          */
    4024              :         for (;;)
    4025              :         {
    4026       157320 :             if (newtupsize > pagefree)
    4027              :             {
    4028              :                 /* It doesn't fit, must use RelationGetBufferForTuple. */
    4029       156643 :                 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
    4030              :                                                    buffer, 0, NULL,
    4031              :                                                    &vmbuffer_new, &vmbuffer,
    4032              :                                                    0);
    4033              :                 /* We're all done. */
    4034       156643 :                 break;
    4035              :             }
    4036              :             /* Acquire VM page pin if needed and we don't have it. */
    4037          677 :             if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    4038            0 :                 visibilitymap_pin(relation, block, &vmbuffer);
    4039              :             /* Re-acquire the lock on the old tuple's page. */
    4040          677 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    4041              :             /* Re-check using the up-to-date free space */
    4042          677 :             pagefree = PageGetHeapFreeSpace(page);
    4043          677 :             if (newtupsize > pagefree ||
    4044          677 :                 (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
    4045              :             {
    4046              :                 /*
    4047              :                  * Rats, it doesn't fit anymore, or somebody just now set the
    4048              :                  * all-visible flag.  We must now unlock and loop to avoid
    4049              :                  * deadlock.  Fortunately, this path should seldom be taken.
    4050              :                  */
    4051            0 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4052              :             }
    4053              :             else
    4054              :             {
    4055              :                 /* We're all done. */
    4056          677 :                 newbuf = buffer;
    4057          677 :                 break;
    4058              :             }
    4059              :         }
    4060              :     }
    4061              :     else
    4062              :     {
    4063              :         /* No TOAST work needed, and it'll fit on same page */
    4064       179093 :         newbuf = buffer;
    4065       179093 :         heaptup = newtup;
    4066              :     }
    4067              : 
    4068       336413 :     newpage = BufferGetPage(newbuf);
    4069              : 
    4070              :     /*
    4071              :      * We're about to do the actual update -- check for conflict first, to
    4072              :      * avoid possibly having to roll back work we've just done.
    4073              :      *
    4074              :      * This is safe without a recheck as long as there is no possibility of
    4075              :      * another process scanning the pages between this check and the update
    4076              :      * being visible to the scan (i.e., exclusive buffer content lock(s) are
    4077              :      * continuously held from this point until the tuple update is visible).
    4078              :      *
    4079              :      * For the new tuple the only check needed is at the relation level, but
    4080              :      * since both tuples are in the same relation and the check for oldtup
    4081              :      * will include checking the relation level, there is no benefit to a
    4082              :      * separate check for the new tuple.
    4083              :      */
    4084       336413 :     CheckForSerializableConflictIn(relation, &oldtup.t_self,
    4085              :                                    BufferGetBlockNumber(buffer));
    4086              : 
    4087              :     /*
    4088              :      * At this point newbuf and buffer are both pinned and locked, and newbuf
    4089              :      * has enough space for the new tuple.  If they are the same buffer, only
    4090              :      * one pin is held.
    4091              :      */
    4092              : 
    4093       336401 :     if (newbuf == buffer)
    4094              :     {
    4095              :         /*
    4096              :          * Since the new tuple is going into the same page, we might be able
    4097              :          * to do a HOT update.  Check if any of the index columns have been
    4098              :          * changed.
    4099              :          */
    4100       179758 :         if (!bms_overlap(modified_attrs, hot_attrs))
    4101              :         {
    4102       163843 :             use_hot_update = true;
    4103              : 
    4104              :             /*
    4105              :              * If none of the columns that are used in hot-blocking indexes
    4106              :              * were updated, we can apply HOT, but we do still need to check
    4107              :              * if we need to update the summarizing indexes, and update those
    4108              :              * indexes if the columns were updated, or we may fail to detect
    4109              :              * e.g. value bound changes in BRIN minmax indexes.
    4110              :              */
    4111       163843 :             if (bms_overlap(modified_attrs, sum_attrs))
    4112         2188 :                 summarized_update = true;
    4113              :         }
    4114              :     }
    4115              :     else
    4116              :     {
    4117              :         /* Set a hint that the old page could use prune/defrag */
    4118       156643 :         PageSetFull(page);
    4119              :     }
    4120              : 
    4121              :     /*
    4122              :      * Compute replica identity tuple before entering the critical section so
    4123              :      * we don't PANIC upon a memory allocation failure.
    4124              :      * ExtractReplicaIdentity() will return NULL if nothing needs to be
    4125              :      * logged.  Pass old key required as true only if the replica identity key
    4126              :      * columns are modified or it has external data.
    4127              :      */
    4128       336401 :     old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
    4129       336401 :                                            bms_overlap(modified_attrs, id_attrs) ||
    4130              :                                            id_has_external,
    4131       336401 :                                            &old_key_copied);
    4132              : 
    4133              :     /* NO EREPORT(ERROR) from here till changes are logged */
    4134       336401 :     START_CRIT_SECTION();
    4135              : 
    4136              :     /*
    4137              :      * If this transaction commits, the old tuple will become DEAD sooner or
    4138              :      * later.  Set flag that this page is a candidate for pruning once our xid
    4139              :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    4140              :      * the subsequent page pruning will be a no-op and the hint will be
    4141              :      * cleared.
    4142              :      *
    4143              :      * XXX Should we set hint on newbuf as well?  If the transaction aborts,
    4144              :      * there would be a prunable tuple in the newbuf; but for now we choose
    4145              :      * not to optimize for aborts.  Note that heap_xlog_update must be kept in
    4146              :      * sync if this decision changes.
    4147              :      */
    4148       336401 :     PageSetPrunable(page, xid);
    4149              : 
    4150       336401 :     if (use_hot_update)
    4151              :     {
    4152              :         /* Mark the old tuple as HOT-updated */
    4153       163843 :         HeapTupleSetHotUpdated(&oldtup);
    4154              :         /* And mark the new tuple as heap-only */
    4155       163843 :         HeapTupleSetHeapOnly(heaptup);
    4156              :         /* Mark the caller's copy too, in case different from heaptup */
    4157       163843 :         HeapTupleSetHeapOnly(newtup);
    4158              :     }
    4159              :     else
    4160              :     {
    4161              :         /* Make sure tuples are correctly marked as not-HOT */
    4162       172558 :         HeapTupleClearHotUpdated(&oldtup);
    4163       172558 :         HeapTupleClearHeapOnly(heaptup);
    4164       172558 :         HeapTupleClearHeapOnly(newtup);
    4165              :     }
    4166              : 
    4167       336401 :     RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
    4168              : 
    4169              : 
    4170              :     /* Clear obsolete visibility flags, possibly set by ourselves above... */
    4171       336401 :     oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    4172       336401 :     oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    4173              :     /* ... and store info about transaction updating this tuple */
    4174              :     Assert(TransactionIdIsValid(xmax_old_tuple));
    4175       336401 :     HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
    4176       336401 :     oldtup.t_data->t_infomask |= infomask_old_tuple;
    4177       336401 :     oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
    4178       336401 :     HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    4179              : 
    4180              :     /* record address of new tuple in t_ctid of old one */
    4181       336401 :     oldtup.t_data->t_ctid = heaptup->t_self;
    4182              : 
    4183              :     /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
    4184       336401 :     if (PageIsAllVisible(page))
    4185              :     {
    4186         2115 :         all_visible_cleared = true;
    4187         2115 :         PageClearAllVisible(page);
    4188         2115 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    4189              :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    4190              :     }
    4191       336401 :     if (newbuf != buffer && PageIsAllVisible(newpage))
    4192              :     {
    4193          787 :         all_visible_cleared_new = true;
    4194          787 :         PageClearAllVisible(newpage);
    4195          787 :         visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
    4196              :                             vmbuffer_new, VISIBILITYMAP_VALID_BITS);
    4197              :     }
    4198              : 
    4199       336401 :     if (newbuf != buffer)
    4200       156643 :         MarkBufferDirty(newbuf);
    4201       336401 :     MarkBufferDirty(buffer);
    4202              : 
    4203              :     /* XLOG stuff */
    4204       336401 :     if (RelationNeedsWAL(relation))
    4205              :     {
    4206              :         XLogRecPtr  recptr;
    4207              : 
    4208              :         /*
    4209              :          * For logical decoding we need combo CIDs to properly decode the
    4210              :          * catalog.
    4211              :          */
    4212       324625 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    4213              :         {
    4214         2623 :             log_heap_new_cid(relation, &oldtup);
    4215         2623 :             log_heap_new_cid(relation, heaptup);
    4216              :         }
    4217              : 
    4218       324625 :         recptr = log_heap_update(relation, buffer,
    4219              :                                  newbuf, &oldtup, heaptup,
    4220              :                                  old_key_tuple,
    4221              :                                  all_visible_cleared,
    4222              :                                  all_visible_cleared_new);
    4223       324625 :         if (newbuf != buffer)
    4224              :         {
    4225       146512 :             PageSetLSN(newpage, recptr);
    4226              :         }
    4227       324625 :         PageSetLSN(page, recptr);
    4228              :     }
    4229              : 
    4230       336401 :     END_CRIT_SECTION();
    4231              : 
    4232       336401 :     if (newbuf != buffer)
    4233       156643 :         LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
    4234       336401 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4235              : 
    4236              :     /*
    4237              :      * Mark old tuple for invalidation from system caches at next command
    4238              :      * boundary, and mark the new tuple for invalidation in case we abort. We
    4239              :      * have to do this before releasing the buffer because oldtup is in the
    4240              :      * buffer.  (heaptup is all in local memory, but it's necessary to process
    4241              :      * both tuple versions in one call to inval.c so we can avoid redundant
    4242              :      * sinval messages.)
    4243              :      */
    4244       336401 :     CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
    4245              : 
    4246              :     /* Now we can release the buffer(s) */
    4247       336401 :     if (newbuf != buffer)
    4248       156643 :         ReleaseBuffer(newbuf);
    4249       336401 :     ReleaseBuffer(buffer);
    4250       336401 :     if (BufferIsValid(vmbuffer_new))
    4251          787 :         ReleaseBuffer(vmbuffer_new);
    4252       336401 :     if (BufferIsValid(vmbuffer))
    4253         2115 :         ReleaseBuffer(vmbuffer);
    4254              : 
    4255              :     /*
    4256              :      * Release the lmgr tuple lock, if we had it.
    4257              :      */
    4258       336401 :     if (have_tuple_lock)
    4259           22 :         UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    4260              : 
    4261       336401 :     pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
    4262              : 
    4263              :     /*
    4264              :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    4265              :      * back to the caller's image, too.
    4266              :      */
    4267       336401 :     if (heaptup != newtup)
    4268              :     {
    4269         1780 :         newtup->t_self = heaptup->t_self;
    4270         1780 :         heap_freetuple(heaptup);
    4271              :     }
    4272              : 
    4273              :     /*
    4274              :      * If it is a HOT update, the update may still need to update summarized
    4275              :      * indexes, lest we fail to update those summaries and get incorrect
    4276              :      * results (for example, minmax bounds of the block may change with this
    4277              :      * update).
    4278              :      */
    4279       336401 :     if (use_hot_update)
    4280              :     {
    4281       163843 :         if (summarized_update)
    4282         2188 :             *update_indexes = TU_Summarizing;
    4283              :         else
    4284       161655 :             *update_indexes = TU_None;
    4285              :     }
    4286              :     else
    4287       172558 :         *update_indexes = TU_All;
    4288              : 
    4289       336401 :     if (old_key_tuple != NULL && old_key_copied)
    4290           85 :         heap_freetuple(old_key_tuple);
    4291              : 
    4292       336401 :     bms_free(hot_attrs);
    4293       336401 :     bms_free(sum_attrs);
    4294       336401 :     bms_free(key_attrs);
    4295       336401 :     bms_free(id_attrs);
    4296       336401 :     bms_free(modified_attrs);
    4297       336401 :     bms_free(interesting_attrs);
    4298              : 
    4299       336401 :     return TM_Ok;
    4300              : }
    4301              : 
    4302              : #ifdef USE_ASSERT_CHECKING
    4303              : /*
    4304              :  * Confirm adequate lock held during heap_update(), per rules from
    4305              :  * README.tuplock section "Locking to write inplace-updated tables".
    4306              :  */
    4307              : static void
    4308              : check_lock_if_inplace_updateable_rel(Relation relation,
    4309              :                                      const ItemPointerData *otid,
    4310              :                                      HeapTuple newtup)
    4311              : {
    4312              :     /* LOCKTAG_TUPLE acceptable for any catalog */
    4313              :     switch (RelationGetRelid(relation))
    4314              :     {
    4315              :         case RelationRelationId:
    4316              :         case DatabaseRelationId:
    4317              :             {
    4318              :                 LOCKTAG     tuptag;
    4319              : 
    4320              :                 SET_LOCKTAG_TUPLE(tuptag,
    4321              :                                   relation->rd_lockInfo.lockRelId.dbId,
    4322              :                                   relation->rd_lockInfo.lockRelId.relId,
    4323              :                                   ItemPointerGetBlockNumber(otid),
    4324              :                                   ItemPointerGetOffsetNumber(otid));
    4325              :                 if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
    4326              :                     return;
    4327              :             }
    4328              :             break;
    4329              :         default:
    4330              :             Assert(!IsInplaceUpdateRelation(relation));
    4331              :             return;
    4332              :     }
    4333              : 
    4334              :     switch (RelationGetRelid(relation))
    4335              :     {
    4336              :         case RelationRelationId:
    4337              :             {
    4338              :                 /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
    4339              :                 Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
    4340              :                 Oid         relid = classForm->oid;
    4341              :                 Oid         dbid;
    4342              :                 LOCKTAG     tag;
    4343              : 
    4344              :                 if (IsSharedRelation(relid))
    4345              :                     dbid = InvalidOid;
    4346              :                 else
    4347              :                     dbid = MyDatabaseId;
    4348              : 
    4349              :                 if (classForm->relkind == RELKIND_INDEX)
    4350              :                 {
    4351              :                     Relation    irel = index_open(relid, AccessShareLock);
    4352              : 
    4353              :                     SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
    4354              :                     index_close(irel, AccessShareLock);
    4355              :                 }
    4356              :                 else
    4357              :                     SET_LOCKTAG_RELATION(tag, dbid, relid);
    4358              : 
    4359              :                 if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
    4360              :                     !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
    4361              :                     elog(WARNING,
    4362              :                          "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
    4363              :                          NameStr(classForm->relname),
    4364              :                          relid,
    4365              :                          classForm->relkind,
    4366              :                          ItemPointerGetBlockNumber(otid),
    4367              :                          ItemPointerGetOffsetNumber(otid));
    4368              :             }
    4369              :             break;
    4370              :         case DatabaseRelationId:
    4371              :             {
    4372              :                 /* LOCKTAG_TUPLE required */
    4373              :                 Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
    4374              : 
    4375              :                 elog(WARNING,
    4376              :                      "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
    4377              :                      NameStr(dbForm->datname),
    4378              :                      dbForm->oid,
    4379              :                      ItemPointerGetBlockNumber(otid),
    4380              :                      ItemPointerGetOffsetNumber(otid));
    4381              :             }
    4382              :             break;
    4383              :     }
    4384              : }
    4385              : 
    4386              : /*
    4387              :  * Confirm adequate relation lock held, per rules from README.tuplock section
    4388              :  * "Locking to write inplace-updated tables".
    4389              :  */
    4390              : static void
    4391              : check_inplace_rel_lock(HeapTuple oldtup)
    4392              : {
    4393              :     Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
    4394              :     Oid         relid = classForm->oid;
    4395              :     Oid         dbid;
    4396              :     LOCKTAG     tag;
    4397              : 
    4398              :     if (IsSharedRelation(relid))
    4399              :         dbid = InvalidOid;
    4400              :     else
    4401              :         dbid = MyDatabaseId;
    4402              : 
    4403              :     if (classForm->relkind == RELKIND_INDEX)
    4404              :     {
    4405              :         Relation    irel = index_open(relid, AccessShareLock);
    4406              : 
    4407              :         SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
    4408              :         index_close(irel, AccessShareLock);
    4409              :     }
    4410              :     else
    4411              :         SET_LOCKTAG_RELATION(tag, dbid, relid);
    4412              : 
    4413              :     if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
    4414              :         elog(WARNING,
    4415              :              "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
    4416              :              NameStr(classForm->relname),
    4417              :              relid,
    4418              :              classForm->relkind,
    4419              :              ItemPointerGetBlockNumber(&oldtup->t_self),
    4420              :              ItemPointerGetOffsetNumber(&oldtup->t_self));
    4421              : }
    4422              : #endif
    4423              : 
    4424              : /*
    4425              :  * Check if the specified attribute's values are the same.  Subroutine for
    4426              :  * HeapDetermineColumnsInfo.
    4427              :  */
    4428              : static bool
    4429       873466 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
    4430              :                  bool isnull1, bool isnull2)
    4431              : {
    4432              :     /*
    4433              :      * If one value is NULL and other is not, then they are certainly not
    4434              :      * equal
    4435              :      */
    4436       873466 :     if (isnull1 != isnull2)
    4437           60 :         return false;
    4438              : 
    4439              :     /*
    4440              :      * If both are NULL, they can be considered equal.
    4441              :      */
    4442       873406 :     if (isnull1)
    4443         6641 :         return true;
    4444              : 
    4445              :     /*
    4446              :      * We do simple binary comparison of the two datums.  This may be overly
    4447              :      * strict because there can be multiple binary representations for the
    4448              :      * same logical value.  But we should be OK as long as there are no false
    4449              :      * positives.  Using a type-specific equality operator is messy because
    4450              :      * there could be multiple notions of equality in different operator
    4451              :      * classes; furthermore, we cannot safely invoke user-defined functions
    4452              :      * while holding exclusive buffer lock.
    4453              :      */
    4454       866765 :     if (attrnum <= 0)
    4455              :     {
    4456              :         /* The only allowed system columns are OIDs, so do this */
    4457            0 :         return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
    4458              :     }
    4459              :     else
    4460              :     {
    4461              :         CompactAttribute *att;
    4462              : 
    4463              :         Assert(attrnum <= tupdesc->natts);
    4464       866765 :         att = TupleDescCompactAttr(tupdesc, attrnum - 1);
    4465       866765 :         return datumIsEqual(value1, value2, att->attbyval, att->attlen);
    4466              :     }
    4467              : }
    4468              : 
    4469              : /*
    4470              :  * Check which columns are being updated.
    4471              :  *
    4472              :  * Given an updated tuple, determine (and return into the output bitmapset),
    4473              :  * from those listed as interesting, the set of columns that changed.
    4474              :  *
    4475              :  * has_external indicates if any of the unmodified attributes (from those
    4476              :  * listed as interesting) of the old tuple is a member of external_cols and is
    4477              :  * stored externally.
    4478              :  */
    4479              : static Bitmapset *
    4480       336594 : HeapDetermineColumnsInfo(Relation relation,
    4481              :                          Bitmapset *interesting_cols,
    4482              :                          Bitmapset *external_cols,
    4483              :                          HeapTuple oldtup, HeapTuple newtup,
    4484              :                          bool *has_external)
    4485              : {
    4486              :     int         attidx;
    4487       336594 :     Bitmapset  *modified = NULL;
    4488       336594 :     TupleDesc   tupdesc = RelationGetDescr(relation);
    4489              : 
    4490       336594 :     attidx = -1;
    4491      1210060 :     while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
    4492              :     {
    4493              :         /* attidx is zero-based, attrnum is the normal attribute number */
    4494       873466 :         AttrNumber  attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
    4495              :         Datum       value1,
    4496              :                     value2;
    4497              :         bool        isnull1,
    4498              :                     isnull2;
    4499              : 
    4500              :         /*
    4501              :          * If it's a whole-tuple reference, say "not equal".  It's not really
    4502              :          * worth supporting this case, since it could only succeed after a
    4503              :          * no-op update, which is hardly a case worth optimizing for.
    4504              :          */
    4505       873466 :         if (attrnum == 0)
    4506              :         {
    4507            0 :             modified = bms_add_member(modified, attidx);
    4508       841722 :             continue;
    4509              :         }
    4510              : 
    4511              :         /*
    4512              :          * Likewise, automatically say "not equal" for any system attribute
    4513              :          * other than tableOID; we cannot expect these to be consistent in a
    4514              :          * HOT chain, or even to be set correctly yet in the new tuple.
    4515              :          */
    4516       873466 :         if (attrnum < 0)
    4517              :         {
    4518            0 :             if (attrnum != TableOidAttributeNumber)
    4519              :             {
    4520            0 :                 modified = bms_add_member(modified, attidx);
    4521            0 :                 continue;
    4522              :             }
    4523              :         }
    4524              : 
    4525              :         /*
    4526              :          * Extract the corresponding values.  XXX this is pretty inefficient
    4527              :          * if there are many indexed columns.  Should we do a single
    4528              :          * heap_deform_tuple call on each tuple, instead?   But that doesn't
    4529              :          * work for system columns ...
    4530              :          */
    4531       873466 :         value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
    4532       873466 :         value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
    4533              : 
    4534       873466 :         if (!heap_attr_equals(tupdesc, attrnum, value1,
    4535              :                               value2, isnull1, isnull2))
    4536              :         {
    4537        34013 :             modified = bms_add_member(modified, attidx);
    4538        34013 :             continue;
    4539              :         }
    4540              : 
    4541              :         /*
    4542              :          * No need to check attributes that can't be stored externally. Note
    4543              :          * that system attributes can't be stored externally.
    4544              :          */
    4545       839453 :         if (attrnum < 0 || isnull1 ||
    4546       832812 :             TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
    4547       807709 :             continue;
    4548              : 
    4549              :         /*
    4550              :          * Check if the old tuple's attribute is stored externally and is a
    4551              :          * member of external_cols.
    4552              :          */
    4553        31749 :         if (VARATT_IS_EXTERNAL((varlena *) DatumGetPointer(value1)) &&
    4554            5 :             bms_is_member(attidx, external_cols))
    4555            2 :             *has_external = true;
    4556              :     }
    4557              : 
    4558       336594 :     return modified;
    4559              : }
    4560              : 
    4561              : /*
    4562              :  *  simple_heap_update - replace a tuple
    4563              :  *
    4564              :  * This routine may be used to update a tuple when concurrent updates of
    4565              :  * the target tuple are not expected (for example, because we have a lock
    4566              :  * on the relation associated with the tuple).  Any failure is reported
    4567              :  * via ereport().
    4568              :  */
    4569              : void
    4570       133850 : simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup,
    4571              :                    TU_UpdateIndexes *update_indexes)
    4572              : {
    4573              :     TM_Result   result;
    4574              :     TM_FailureData tmfd;
    4575              :     LockTupleMode lockmode;
    4576              : 
    4577       133850 :     result = heap_update(relation, otid, tup,
    4578              :                          GetCurrentCommandId(true), InvalidSnapshot,
    4579              :                          true /* wait for commit */ ,
    4580              :                          &tmfd, &lockmode, update_indexes);
    4581       133850 :     switch (result)
    4582              :     {
    4583            0 :         case TM_SelfModified:
    4584              :             /* Tuple was already updated in current command? */
    4585            0 :             elog(ERROR, "tuple already updated by self");
    4586              :             break;
    4587              : 
    4588       133849 :         case TM_Ok:
    4589              :             /* done successfully */
    4590       133849 :             break;
    4591              : 
    4592            0 :         case TM_Updated:
    4593            0 :             elog(ERROR, "tuple concurrently updated");
    4594              :             break;
    4595              : 
    4596            1 :         case TM_Deleted:
    4597            1 :             elog(ERROR, "tuple concurrently deleted");
    4598              :             break;
    4599              : 
    4600            0 :         default:
    4601            0 :             elog(ERROR, "unrecognized heap_update status: %u", result);
    4602              :             break;
    4603              :     }
    4604       133849 : }
    4605              : 
    4606              : 
    4607              : /*
    4608              :  * Return the MultiXactStatus corresponding to the given tuple lock mode.
    4609              :  */
    4610              : static MultiXactStatus
    4611       115506 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
    4612              : {
    4613              :     int         retval;
    4614              : 
    4615       115506 :     if (is_update)
    4616          216 :         retval = tupleLockExtraInfo[mode].updstatus;
    4617              :     else
    4618       115290 :         retval = tupleLockExtraInfo[mode].lockstatus;
    4619              : 
    4620       115506 :     if (retval == -1)
    4621            0 :         elog(ERROR, "invalid lock tuple mode %d/%s", mode,
    4622              :              is_update ? "true" : "false");
    4623              : 
    4624       115506 :     return (MultiXactStatus) retval;
    4625              : }
    4626              : 
    4627              : /*
    4628              :  *  heap_lock_tuple - lock a tuple in shared or exclusive mode
    4629              :  *
    4630              :  * Note that this acquires a buffer pin, which the caller must release.
    4631              :  *
    4632              :  * Input parameters:
    4633              :  *  relation: relation containing tuple (caller must hold suitable lock)
    4634              :  *  cid: current command ID (used for visibility test, and stored into
    4635              :  *      tuple's cmax if lock is successful)
    4636              :  *  mode: indicates if shared or exclusive tuple lock is desired
    4637              :  *  wait_policy: what to do if tuple lock is not available
    4638              :  *  follow_updates: if true, follow the update chain to also lock descendant
    4639              :  *      tuples.
    4640              :  *
    4641              :  * Output parameters:
    4642              :  *  *tuple: all fields filled in
    4643              :  *  *buffer: set to buffer holding tuple (pinned but not locked at exit)
    4644              :  *  *tmfd: filled in failure cases (see below)
    4645              :  *
    4646              :  * Function results are the same as the ones for table_tuple_lock().
    4647              :  *
    4648              :  * In the failure cases other than TM_Invisible, the routine fills
    4649              :  * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
    4650              :  * if necessary), and t_cmax (the last only for TM_SelfModified,
    4651              :  * since we cannot obtain cmax from a combo CID generated by another
    4652              :  * transaction).
    4653              :  * See comments for struct TM_FailureData for additional info.
    4654              :  *
    4655              :  * See README.tuplock for a thorough explanation of this mechanism.
    4656              :  */
    4657              : TM_Result
    4658       559748 : heap_lock_tuple(Relation relation, HeapTuple tuple,
    4659              :                 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
    4660              :                 bool follow_updates,
    4661              :                 Buffer *buffer, TM_FailureData *tmfd)
    4662              : {
    4663              :     TM_Result   result;
    4664       559748 :     ItemPointer tid = &(tuple->t_self);
    4665              :     ItemId      lp;
    4666              :     Page        page;
    4667       559748 :     Buffer      vmbuffer = InvalidBuffer;
    4668              :     BlockNumber block;
    4669              :     TransactionId xid,
    4670              :                 xmax;
    4671              :     uint16      old_infomask,
    4672              :                 new_infomask,
    4673              :                 new_infomask2;
    4674       559748 :     bool        first_time = true;
    4675       559748 :     bool        skip_tuple_lock = false;
    4676       559748 :     bool        have_tuple_lock = false;
    4677       559748 :     bool        cleared_all_frozen = false;
    4678              : 
    4679       559748 :     *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    4680       559748 :     block = ItemPointerGetBlockNumber(tid);
    4681              : 
    4682              :     /*
    4683              :      * Before locking the buffer, pin the visibility map page if it appears to
    4684              :      * be necessary.  Since we haven't got the lock yet, someone else might be
    4685              :      * in the middle of changing this, so we'll need to recheck after we have
    4686              :      * the lock.
    4687              :      */
    4688       559748 :     if (PageIsAllVisible(BufferGetPage(*buffer)))
    4689       401796 :         visibilitymap_pin(relation, block, &vmbuffer);
    4690              : 
    4691       559748 :     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4692              : 
    4693       559748 :     page = BufferGetPage(*buffer);
    4694       559748 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    4695              :     Assert(ItemIdIsNormal(lp));
    4696              : 
    4697       559748 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    4698       559748 :     tuple->t_len = ItemIdGetLength(lp);
    4699       559748 :     tuple->t_tableOid = RelationGetRelid(relation);
    4700              : 
    4701           17 : l3:
    4702       559765 :     result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
    4703              : 
    4704       559765 :     if (result == TM_Invisible)
    4705              :     {
    4706              :         /*
    4707              :          * This is possible, but only when locking a tuple for ON CONFLICT DO
    4708              :          * SELECT/UPDATE.  We return this value here rather than throwing an
    4709              :          * error in order to give that case the opportunity to throw a more
    4710              :          * specific error.
    4711              :          */
    4712           28 :         result = TM_Invisible;
    4713           28 :         goto out_locked;
    4714              :     }
    4715       559737 :     else if (result == TM_BeingModified ||
    4716        78551 :              result == TM_Updated ||
    4717              :              result == TM_Deleted)
    4718              :     {
    4719              :         TransactionId xwait;
    4720              :         uint16      infomask;
    4721              :         uint16      infomask2;
    4722              :         bool        require_sleep;
    4723              :         ItemPointerData t_ctid;
    4724              : 
    4725              :         /* must copy state data before unlocking buffer */
    4726       481188 :         xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
    4727       481188 :         infomask = tuple->t_data->t_infomask;
    4728       481188 :         infomask2 = tuple->t_data->t_infomask2;
    4729       481188 :         ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
    4730              : 
    4731       481188 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4732              : 
    4733              :         /*
    4734              :          * If any subtransaction of the current top transaction already holds
    4735              :          * a lock as strong as or stronger than what we're requesting, we
    4736              :          * effectively hold the desired lock already.  We *must* succeed
    4737              :          * without trying to take the tuple lock, else we will deadlock
    4738              :          * against anyone wanting to acquire a stronger lock.
    4739              :          *
    4740              :          * Note we only do this the first time we loop on the HTSU result;
    4741              :          * there is no point in testing in subsequent passes, because
    4742              :          * evidently our own transaction cannot have acquired a new lock after
    4743              :          * the first time we checked.
    4744              :          */
    4745       481188 :         if (first_time)
    4746              :         {
    4747       481176 :             first_time = false;
    4748              : 
    4749       481176 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4750              :             {
    4751              :                 int         i;
    4752              :                 int         nmembers;
    4753              :                 MultiXactMember *members;
    4754              : 
    4755              :                 /*
    4756              :                  * We don't need to allow old multixacts here; if that had
    4757              :                  * been the case, HeapTupleSatisfiesUpdate would have returned
    4758              :                  * MayBeUpdated and we wouldn't be here.
    4759              :                  */
    4760              :                 nmembers =
    4761        73295 :                     GetMultiXactIdMembers(xwait, &members, false,
    4762        73295 :                                           HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    4763              : 
    4764      1422658 :                 for (i = 0; i < nmembers; i++)
    4765              :                 {
    4766              :                     /* only consider members of our own transaction */
    4767      1349377 :                     if (!TransactionIdIsCurrentTransactionId(members[i].xid))
    4768      1349327 :                         continue;
    4769              : 
    4770           50 :                     if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
    4771              :                     {
    4772           14 :                         pfree(members);
    4773           14 :                         result = TM_Ok;
    4774           14 :                         goto out_unlocked;
    4775              :                     }
    4776              :                     else
    4777              :                     {
    4778              :                         /*
    4779              :                          * Disable acquisition of the heavyweight tuple lock.
    4780              :                          * Otherwise, when promoting a weaker lock, we might
    4781              :                          * deadlock with another locker that has acquired the
    4782              :                          * heavyweight tuple lock and is waiting for our
    4783              :                          * transaction to finish.
    4784              :                          *
    4785              :                          * Note that in this case we still need to wait for
    4786              :                          * the multixact if required, to avoid acquiring
    4787              :                          * conflicting locks.
    4788              :                          */
    4789           36 :                         skip_tuple_lock = true;
    4790              :                     }
    4791              :                 }
    4792              : 
    4793        73281 :                 if (members)
    4794        73281 :                     pfree(members);
    4795              :             }
    4796       407881 :             else if (TransactionIdIsCurrentTransactionId(xwait))
    4797              :             {
    4798       406487 :                 switch (mode)
    4799              :                 {
    4800       400052 :                     case LockTupleKeyShare:
    4801              :                         Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
    4802              :                                HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4803              :                                HEAP_XMAX_IS_EXCL_LOCKED(infomask));
    4804       400052 :                         result = TM_Ok;
    4805       400052 :                         goto out_unlocked;
    4806           38 :                     case LockTupleShare:
    4807           44 :                         if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4808            6 :                             HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4809              :                         {
    4810           32 :                             result = TM_Ok;
    4811           32 :                             goto out_unlocked;
    4812              :                         }
    4813            6 :                         break;
    4814           82 :                     case LockTupleNoKeyExclusive:
    4815           82 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4816              :                         {
    4817           70 :                             result = TM_Ok;
    4818           70 :                             goto out_unlocked;
    4819              :                         }
    4820           12 :                         break;
    4821         6315 :                     case LockTupleExclusive:
    4822         6315 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
    4823         1273 :                             infomask2 & HEAP_KEYS_UPDATED)
    4824              :                         {
    4825         1245 :                             result = TM_Ok;
    4826         1245 :                             goto out_unlocked;
    4827              :                         }
    4828         5070 :                         break;
    4829              :                 }
    4830              :             }
    4831              :         }
    4832              : 
    4833              :         /*
    4834              :          * Initially assume that we will have to wait for the locking
    4835              :          * transaction(s) to finish.  We check various cases below in which
    4836              :          * this can be turned off.
    4837              :          */
    4838        79775 :         require_sleep = true;
    4839        79775 :         if (mode == LockTupleKeyShare)
    4840              :         {
    4841              :             /*
    4842              :              * If we're requesting KeyShare, and there's no update present, we
    4843              :              * don't need to wait.  Even if there is an update, we can still
    4844              :              * continue if the key hasn't been modified.
    4845              :              *
    4846              :              * However, if there are updates, we need to walk the update chain
    4847              :              * to mark future versions of the row as locked, too.  That way,
    4848              :              * if somebody deletes that future version, we're protected
    4849              :              * against the key going away.  This locking of future versions
    4850              :              * could block momentarily, if a concurrent transaction is
    4851              :              * deleting a key; or it could return a value to the effect that
    4852              :              * the transaction deleting the key has already committed.  So we
    4853              :              * do this before re-locking the buffer; otherwise this would be
    4854              :              * prone to deadlocks.
    4855              :              *
    4856              :              * Note that the TID we're locking was grabbed before we unlocked
    4857              :              * the buffer.  For it to change while we're not looking, the
    4858              :              * other properties we're testing for below after re-locking the
    4859              :              * buffer would also change, in which case we would restart this
    4860              :              * loop above.
    4861              :              */
    4862        73888 :             if (!(infomask2 & HEAP_KEYS_UPDATED))
    4863              :             {
    4864              :                 bool        updated;
    4865              : 
    4866        73845 :                 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
    4867              : 
    4868              :                 /*
    4869              :                  * If there are updates, follow the update chain; bail out if
    4870              :                  * that cannot be done.
    4871              :                  */
    4872        73845 :                 if (follow_updates && updated &&
    4873         2169 :                     !ItemPointerEquals(&tuple->t_self, &t_ctid))
    4874              :                 {
    4875              :                     TM_Result   res;
    4876              : 
    4877         2169 :                     res = heap_lock_updated_tuple(relation,
    4878              :                                                   infomask, xwait, &t_ctid,
    4879              :                                                   GetCurrentTransactionId(),
    4880              :                                                   mode);
    4881         2169 :                     if (res != TM_Ok)
    4882              :                     {
    4883            6 :                         result = res;
    4884              :                         /* recovery code expects to have buffer lock held */
    4885            6 :                         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4886          200 :                         goto failed;
    4887              :                     }
    4888              :                 }
    4889              : 
    4890        73839 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4891              : 
    4892              :                 /*
    4893              :                  * Make sure it's still an appropriate lock, else start over.
    4894              :                  * Also, if it wasn't updated before we released the lock, but
    4895              :                  * is updated now, we start over too; the reason is that we
    4896              :                  * now need to follow the update chain to lock the new
    4897              :                  * versions.
    4898              :                  */
    4899        73839 :                 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
    4900         2151 :                     ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
    4901         2151 :                      !updated))
    4902           17 :                     goto l3;
    4903              : 
    4904              :                 /* Things look okay, so we can skip sleeping */
    4905        73839 :                 require_sleep = false;
    4906              : 
    4907              :                 /*
    4908              :                  * Note we allow Xmax to change here; other updaters/lockers
    4909              :                  * could have modified it before we grabbed the buffer lock.
    4910              :                  * However, this is not a problem, because with the recheck we
    4911              :                  * just did we ensure that they still don't conflict with the
    4912              :                  * lock we want.
    4913              :                  */
    4914              :             }
    4915              :         }
    4916         5887 :         else if (mode == LockTupleShare)
    4917              :         {
    4918              :             /*
    4919              :              * If we're requesting Share, we can similarly avoid sleeping if
    4920              :              * there's no update and no exclusive lock present.
    4921              :              */
    4922          477 :             if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
    4923          477 :                 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4924              :             {
    4925          471 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4926              : 
    4927              :                 /*
    4928              :                  * Make sure it's still an appropriate lock, else start over.
    4929              :                  * See above about allowing xmax to change.
    4930              :                  */
    4931          942 :                 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    4932          471 :                     HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
    4933            0 :                     goto l3;
    4934          471 :                 require_sleep = false;
    4935              :             }
    4936              :         }
    4937         5410 :         else if (mode == LockTupleNoKeyExclusive)
    4938              :         {
    4939              :             /*
    4940              :              * If we're requesting NoKeyExclusive, we might also be able to
    4941              :              * avoid sleeping; just ensure that there no conflicting lock
    4942              :              * already acquired.
    4943              :              */
    4944          173 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4945              :             {
    4946           26 :                 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    4947              :                                              mode, NULL))
    4948              :                 {
    4949              :                     /*
    4950              :                      * No conflict, but if the xmax changed under us in the
    4951              :                      * meantime, start over.
    4952              :                      */
    4953           13 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4954           26 :                     if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4955           13 :                         !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4956              :                                              xwait))
    4957            0 :                         goto l3;
    4958              : 
    4959              :                     /* otherwise, we're good */
    4960           13 :                     require_sleep = false;
    4961              :                 }
    4962              :             }
    4963          147 :             else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
    4964              :             {
    4965           18 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4966              : 
    4967              :                 /* if the xmax changed in the meantime, start over */
    4968           36 :                 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4969           18 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4970              :                                          xwait))
    4971            0 :                     goto l3;
    4972              :                 /* otherwise, we're good */
    4973           18 :                 require_sleep = false;
    4974              :             }
    4975              :         }
    4976              : 
    4977              :         /*
    4978              :          * As a check independent from those above, we can also avoid sleeping
    4979              :          * if the current transaction is the sole locker of the tuple.  Note
    4980              :          * that the strength of the lock already held is irrelevant; this is
    4981              :          * not about recording the lock in Xmax (which will be done regardless
    4982              :          * of this optimization, below).  Also, note that the cases where we
    4983              :          * hold a lock stronger than we are requesting are already handled
    4984              :          * above by not doing anything.
    4985              :          *
    4986              :          * Note we only deal with the non-multixact case here; MultiXactIdWait
    4987              :          * is well equipped to deal with this situation on its own.
    4988              :          */
    4989        85153 :         if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
    4990         5384 :             TransactionIdIsCurrentTransactionId(xwait))
    4991              :         {
    4992              :             /* ... but if the xmax changed in the meantime, start over */
    4993         5070 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4994        10140 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4995         5070 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4996              :                                      xwait))
    4997            0 :                 goto l3;
    4998              :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
    4999         5070 :             require_sleep = false;
    5000              :         }
    5001              : 
    5002              :         /*
    5003              :          * Time to sleep on the other transaction/multixact, if necessary.
    5004              :          *
    5005              :          * If the other transaction is an update/delete that's already
    5006              :          * committed, then sleeping cannot possibly do any good: if we're
    5007              :          * required to sleep, get out to raise an error instead.
    5008              :          *
    5009              :          * By here, we either have already acquired the buffer exclusive lock,
    5010              :          * or we must wait for the locking transaction or multixact; so below
    5011              :          * we ensure that we grab buffer lock after the sleep.
    5012              :          */
    5013        79769 :         if (require_sleep && (result == TM_Updated || result == TM_Deleted))
    5014              :         {
    5015          156 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5016          156 :             goto failed;
    5017              :         }
    5018        79613 :         else if (require_sleep)
    5019              :         {
    5020              :             /*
    5021              :              * Acquire tuple lock to establish our priority for the tuple, or
    5022              :              * die trying.  LockTuple will release us when we are next-in-line
    5023              :              * for the tuple.  We must do this even if we are share-locking,
    5024              :              * but not if we already have a weaker lock on the tuple.
    5025              :              *
    5026              :              * If we are forced to "start over" below, we keep the tuple lock;
    5027              :              * this arranges that we stay at the head of the line while
    5028              :              * rechecking tuple state.
    5029              :              */
    5030          202 :             if (!skip_tuple_lock &&
    5031          185 :                 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
    5032              :                                       &have_tuple_lock))
    5033              :             {
    5034              :                 /*
    5035              :                  * This can only happen if wait_policy is Skip and the lock
    5036              :                  * couldn't be obtained.
    5037              :                  */
    5038            1 :                 result = TM_WouldBlock;
    5039              :                 /* recovery code expects to have buffer lock held */
    5040            1 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5041            1 :                 goto failed;
    5042              :             }
    5043              : 
    5044          200 :             if (infomask & HEAP_XMAX_IS_MULTI)
    5045              :             {
    5046           43 :                 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
    5047              : 
    5048              :                 /* We only ever lock tuples, never update them */
    5049           43 :                 if (status >= MultiXactStatusNoKeyUpdate)
    5050            0 :                     elog(ERROR, "invalid lock mode in heap_lock_tuple");
    5051              : 
    5052              :                 /* wait for multixact to end, or die trying  */
    5053           43 :                 switch (wait_policy)
    5054              :                 {
    5055           37 :                     case LockWaitBlock:
    5056           37 :                         MultiXactIdWait((MultiXactId) xwait, status, infomask,
    5057           37 :                                         relation, &tuple->t_self, XLTW_Lock, NULL);
    5058           37 :                         break;
    5059            2 :                     case LockWaitSkip:
    5060            2 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    5061              :                                                         status, infomask, relation,
    5062              :                                                         NULL, false))
    5063              :                         {
    5064            2 :                             result = TM_WouldBlock;
    5065              :                             /* recovery code expects to have buffer lock held */
    5066            2 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5067            2 :                             goto failed;
    5068              :                         }
    5069            0 :                         break;
    5070            4 :                     case LockWaitError:
    5071            4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    5072              :                                                         status, infomask, relation,
    5073              :                                                         NULL, log_lock_failures))
    5074            4 :                             ereport(ERROR,
    5075              :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5076              :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    5077              :                                             RelationGetRelationName(relation))));
    5078              : 
    5079            0 :                         break;
    5080              :                 }
    5081              : 
    5082              :                 /*
    5083              :                  * Of course, the multixact might not be done here: if we're
    5084              :                  * requesting a light lock mode, other transactions with light
    5085              :                  * locks could still be alive, as well as locks owned by our
    5086              :                  * own xact or other subxacts of this backend.  We need to
    5087              :                  * preserve the surviving MultiXact members.  Note that it
    5088              :                  * isn't absolutely necessary in the latter case, but doing so
    5089              :                  * is simpler.
    5090              :                  */
    5091              :             }
    5092              :             else
    5093              :             {
    5094              :                 /* wait for regular transaction to end, or die trying */
    5095          157 :                 switch (wait_policy)
    5096              :                 {
    5097          116 :                     case LockWaitBlock:
    5098          116 :                         XactLockTableWait(xwait, relation, &tuple->t_self,
    5099              :                                           XLTW_Lock);
    5100          116 :                         break;
    5101           33 :                     case LockWaitSkip:
    5102           33 :                         if (!ConditionalXactLockTableWait(xwait, false))
    5103              :                         {
    5104           33 :                             result = TM_WouldBlock;
    5105              :                             /* recovery code expects to have buffer lock held */
    5106           33 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5107           33 :                             goto failed;
    5108              :                         }
    5109            0 :                         break;
    5110            8 :                     case LockWaitError:
    5111            8 :                         if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
    5112            8 :                             ereport(ERROR,
    5113              :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5114              :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    5115              :                                             RelationGetRelationName(relation))));
    5116            0 :                         break;
    5117              :                 }
    5118              :             }
    5119              : 
    5120              :             /* if there are updates, follow the update chain */
    5121          153 :             if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
    5122           59 :                 !ItemPointerEquals(&tuple->t_self, &t_ctid))
    5123              :             {
    5124              :                 TM_Result   res;
    5125              : 
    5126           46 :                 res = heap_lock_updated_tuple(relation,
    5127              :                                               infomask, xwait, &t_ctid,
    5128              :                                               GetCurrentTransactionId(),
    5129              :                                               mode);
    5130           46 :                 if (res != TM_Ok)
    5131              :                 {
    5132            2 :                     result = res;
    5133              :                     /* recovery code expects to have buffer lock held */
    5134            2 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5135            2 :                     goto failed;
    5136              :                 }
    5137              :             }
    5138              : 
    5139          151 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5140              : 
    5141              :             /*
    5142              :              * xwait is done, but if xwait had just locked the tuple then some
    5143              :              * other xact could update this tuple before we get to this point.
    5144              :              * Check for xmax change, and start over if so.
    5145              :              */
    5146          287 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    5147          136 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    5148              :                                      xwait))
    5149           17 :                 goto l3;
    5150              : 
    5151          134 :             if (!(infomask & HEAP_XMAX_IS_MULTI))
    5152              :             {
    5153              :                 /*
    5154              :                  * Otherwise check if it committed or aborted.  Note we cannot
    5155              :                  * be here if the tuple was only locked by somebody who didn't
    5156              :                  * conflict with us; that would have been handled above.  So
    5157              :                  * that transaction must necessarily be gone by now.  But
    5158              :                  * don't check for this in the multixact case, because some
    5159              :                  * locker transactions might still be running.
    5160              :                  */
    5161          101 :                 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
    5162              :             }
    5163              :         }
    5164              : 
    5165              :         /* By here, we're certain that we hold buffer exclusive lock again */
    5166              : 
    5167              :         /*
    5168              :          * We may lock if previous xmax aborted, or if it committed but only
    5169              :          * locked the tuple without updating it; or if we didn't have to wait
    5170              :          * at all for whatever reason.
    5171              :          */
    5172        79545 :         if (!require_sleep ||
    5173          232 :             (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
    5174          180 :             HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    5175           82 :             HeapTupleHeaderIsOnlyLocked(tuple->t_data))
    5176        79470 :             result = TM_Ok;
    5177           75 :         else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
    5178           57 :             result = TM_Updated;
    5179              :         else
    5180           18 :             result = TM_Deleted;
    5181              :     }
    5182              : 
    5183        78549 : failed:
    5184       158294 :     if (result != TM_Ok)
    5185              :     {
    5186              :         Assert(result == TM_SelfModified || result == TM_Updated ||
    5187              :                result == TM_Deleted || result == TM_WouldBlock);
    5188              : 
    5189              :         /*
    5190              :          * When locking a tuple under LockWaitSkip semantics and we fail with
    5191              :          * TM_WouldBlock above, it's possible for concurrent transactions to
    5192              :          * release the lock and set HEAP_XMAX_INVALID in the meantime.  So
    5193              :          * this assert is slightly different from the equivalent one in
    5194              :          * heap_delete and heap_update.
    5195              :          */
    5196              :         Assert((result == TM_WouldBlock) ||
    5197              :                !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
    5198              :         Assert(result != TM_Updated ||
    5199              :                !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
    5200          283 :         tmfd->ctid = tuple->t_data->t_ctid;
    5201          283 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    5202          283 :         if (result == TM_SelfModified)
    5203            8 :             tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
    5204              :         else
    5205          275 :             tmfd->cmax = InvalidCommandId;
    5206          283 :         goto out_locked;
    5207              :     }
    5208              : 
    5209              :     /*
    5210              :      * If we didn't pin the visibility map page and the page has become all
    5211              :      * visible while we were busy locking the buffer, or during some
    5212              :      * subsequent window during which we had it unlocked, we'll have to unlock
    5213              :      * and re-lock, to avoid holding the buffer lock across I/O.  That's a bit
    5214              :      * unfortunate, especially since we'll now have to recheck whether the
    5215              :      * tuple has been locked or updated under us, but hopefully it won't
    5216              :      * happen very often.
    5217              :      */
    5218       158011 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    5219              :     {
    5220            0 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    5221            0 :         visibilitymap_pin(relation, block, &vmbuffer);
    5222            0 :         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    5223            0 :         goto l3;
    5224              :     }
    5225              : 
    5226       158011 :     xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
    5227       158011 :     old_infomask = tuple->t_data->t_infomask;
    5228              : 
    5229              :     /*
    5230              :      * If this is the first possibly-multixact-able operation in the current
    5231              :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    5232              :      * certain that the transaction will never become a member of any older
    5233              :      * MultiXactIds than that.  (We have to do this even if we end up just
    5234              :      * using our own TransactionId below, since some other backend could
    5235              :      * incorporate our XID into a MultiXact immediately afterwards.)
    5236              :      */
    5237       158011 :     MultiXactIdSetOldestMember();
    5238              : 
    5239              :     /*
    5240              :      * Compute the new xmax and infomask to store into the tuple.  Note we do
    5241              :      * not modify the tuple just yet, because that would leave it in the wrong
    5242              :      * state if multixact.c elogs.
    5243              :      */
    5244       158011 :     compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
    5245              :                               GetCurrentTransactionId(), mode, false,
    5246              :                               &xid, &new_infomask, &new_infomask2);
    5247              : 
    5248       158011 :     START_CRIT_SECTION();
    5249              : 
    5250              :     /*
    5251              :      * Store transaction information of xact locking the tuple.
    5252              :      *
    5253              :      * Note: Cmax is meaningless in this context, so don't set it; this avoids
    5254              :      * possibly generating a useless combo CID.  Moreover, if we're locking a
    5255              :      * previously updated tuple, it's important to preserve the Cmax.
    5256              :      *
    5257              :      * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
    5258              :      * we would break the HOT chain.
    5259              :      */
    5260       158011 :     tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
    5261       158011 :     tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    5262       158011 :     tuple->t_data->t_infomask |= new_infomask;
    5263       158011 :     tuple->t_data->t_infomask2 |= new_infomask2;
    5264       158011 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    5265       155864 :         HeapTupleHeaderClearHotUpdated(tuple->t_data);
    5266       158011 :     HeapTupleHeaderSetXmax(tuple->t_data, xid);
    5267              : 
    5268              :     /*
    5269              :      * Make sure there is no forward chain link in t_ctid.  Note that in the
    5270              :      * cases where the tuple has been updated, we must not overwrite t_ctid,
    5271              :      * because it was set by the updater.  Moreover, if the tuple has been
    5272              :      * updated, we need to follow the update chain to lock the new versions of
    5273              :      * the tuple as well.
    5274              :      */
    5275       158011 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    5276       155864 :         tuple->t_data->t_ctid = *tid;
    5277              : 
    5278              :     /* Clear only the all-frozen bit on visibility map if needed */
    5279       160013 :     if (PageIsAllVisible(page) &&
    5280         2002 :         visibilitymap_clear(relation, block, vmbuffer,
    5281              :                             VISIBILITYMAP_ALL_FROZEN))
    5282           16 :         cleared_all_frozen = true;
    5283              : 
    5284              : 
    5285       158011 :     MarkBufferDirty(*buffer);
    5286              : 
    5287              :     /*
    5288              :      * XLOG stuff.  You might think that we don't need an XLOG record because
    5289              :      * there is no state change worth restoring after a crash.  You would be
    5290              :      * wrong however: we have just written either a TransactionId or a
    5291              :      * MultiXactId that may never have been seen on disk before, and we need
    5292              :      * to make sure that there are XLOG entries covering those ID numbers.
    5293              :      * Else the same IDs might be re-used after a crash, which would be
    5294              :      * disastrous if this page made it to disk before the crash.  Essentially
    5295              :      * we have to enforce the WAL log-before-data rule even in this case.
    5296              :      * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
    5297              :      * entries for everything anyway.)
    5298              :      */
    5299       158011 :     if (RelationNeedsWAL(relation))
    5300              :     {
    5301              :         xl_heap_lock xlrec;
    5302              :         XLogRecPtr  recptr;
    5303              : 
    5304       157582 :         XLogBeginInsert();
    5305       157582 :         XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
    5306              : 
    5307       157582 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    5308       157582 :         xlrec.xmax = xid;
    5309       315164 :         xlrec.infobits_set = compute_infobits(new_infomask,
    5310       157582 :                                               tuple->t_data->t_infomask2);
    5311       157582 :         xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    5312       157582 :         XLogRegisterData(&xlrec, SizeOfHeapLock);
    5313              : 
    5314              :         /* we don't decode row locks atm, so no need to log the origin */
    5315              : 
    5316       157582 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    5317              : 
    5318       157582 :         PageSetLSN(page, recptr);
    5319              :     }
    5320              : 
    5321       158011 :     END_CRIT_SECTION();
    5322              : 
    5323       158011 :     result = TM_Ok;
    5324              : 
    5325       158322 : out_locked:
    5326       158322 :     LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    5327              : 
    5328       559735 : out_unlocked:
    5329       559735 :     if (BufferIsValid(vmbuffer))
    5330       401796 :         ReleaseBuffer(vmbuffer);
    5331              : 
    5332              :     /*
    5333              :      * Don't update the visibility map here. Locking a tuple doesn't change
    5334              :      * visibility info.
    5335              :      */
    5336              : 
    5337              :     /*
    5338              :      * Now that we have successfully marked the tuple as locked, we can
    5339              :      * release the lmgr tuple lock, if we had it.
    5340              :      */
    5341       559735 :     if (have_tuple_lock)
    5342          166 :         UnlockTupleTuplock(relation, tid, mode);
    5343              : 
    5344       559735 :     return result;
    5345              : }
    5346              : 
    5347              : /*
    5348              :  * Acquire heavyweight lock on the given tuple, in preparation for acquiring
    5349              :  * its normal, Xmax-based tuple lock.
    5350              :  *
    5351              :  * have_tuple_lock is an input and output parameter: on input, it indicates
    5352              :  * whether the lock has previously been acquired (and this function does
    5353              :  * nothing in that case).  If this function returns success, have_tuple_lock
    5354              :  * has been flipped to true.
    5355              :  *
    5356              :  * Returns false if it was unable to obtain the lock; this can only happen if
    5357              :  * wait_policy is Skip.
    5358              :  */
    5359              : static bool
    5360          325 : heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode,
    5361              :                      LockWaitPolicy wait_policy, bool *have_tuple_lock)
    5362              : {
    5363          325 :     if (*have_tuple_lock)
    5364            9 :         return true;
    5365              : 
    5366          316 :     switch (wait_policy)
    5367              :     {
    5368          271 :         case LockWaitBlock:
    5369          271 :             LockTupleTuplock(relation, tid, mode);
    5370          271 :             break;
    5371              : 
    5372           34 :         case LockWaitSkip:
    5373           34 :             if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
    5374            1 :                 return false;
    5375           33 :             break;
    5376              : 
    5377           11 :         case LockWaitError:
    5378           11 :             if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
    5379            1 :                 ereport(ERROR,
    5380              :                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5381              :                          errmsg("could not obtain lock on row in relation \"%s\"",
    5382              :                                 RelationGetRelationName(relation))));
    5383           10 :             break;
    5384              :     }
    5385          314 :     *have_tuple_lock = true;
    5386              : 
    5387          314 :     return true;
    5388              : }
    5389              : 
    5390              : /*
    5391              :  * Given an original set of Xmax and infomask, and a transaction (identified by
    5392              :  * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
    5393              :  * corresponding infomasks to use on the tuple.
    5394              :  *
    5395              :  * Note that this might have side effects such as creating a new MultiXactId.
    5396              :  *
    5397              :  * Most callers will have called HeapTupleSatisfiesUpdate before this function;
    5398              :  * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
    5399              :  * but it was not running anymore. There is a race condition, which is that the
    5400              :  * MultiXactId may have finished since then, but that uncommon case is handled
    5401              :  * either here, or within MultiXactIdExpand.
    5402              :  *
    5403              :  * There is a similar race condition possible when the old xmax was a regular
    5404              :  * TransactionId.  We test TransactionIdIsInProgress again just to narrow the
    5405              :  * window, but it's still possible to end up creating an unnecessary
    5406              :  * MultiXactId.  Fortunately this is harmless.
    5407              :  */
    5408              : static void
    5409      2498525 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
    5410              :                           uint16 old_infomask2, TransactionId add_to_xmax,
    5411              :                           LockTupleMode mode, bool is_update,
    5412              :                           TransactionId *result_xmax, uint16 *result_infomask,
    5413              :                           uint16 *result_infomask2)
    5414              : {
    5415              :     TransactionId new_xmax;
    5416              :     uint16      new_infomask,
    5417              :                 new_infomask2;
    5418              : 
    5419              :     Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
    5420              : 
    5421       104353 : l5:
    5422      2602878 :     new_infomask = 0;
    5423      2602878 :     new_infomask2 = 0;
    5424      2602878 :     if (old_infomask & HEAP_XMAX_INVALID)
    5425              :     {
    5426              :         /*
    5427              :          * No previous locker; we just insert our own TransactionId.
    5428              :          *
    5429              :          * Note that it's critical that this case be the first one checked,
    5430              :          * because there are several blocks below that come back to this one
    5431              :          * to implement certain optimizations; old_infomask might contain
    5432              :          * other dirty bits in those cases, but we don't really care.
    5433              :          */
    5434      2421836 :         if (is_update)
    5435              :         {
    5436      2180797 :             new_xmax = add_to_xmax;
    5437      2180797 :             if (mode == LockTupleExclusive)
    5438      1882233 :                 new_infomask2 |= HEAP_KEYS_UPDATED;
    5439              :         }
    5440              :         else
    5441              :         {
    5442       241039 :             new_infomask |= HEAP_XMAX_LOCK_ONLY;
    5443       241039 :             switch (mode)
    5444              :             {
    5445         3446 :                 case LockTupleKeyShare:
    5446         3446 :                     new_xmax = add_to_xmax;
    5447         3446 :                     new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
    5448         3446 :                     break;
    5449          811 :                 case LockTupleShare:
    5450          811 :                     new_xmax = add_to_xmax;
    5451          811 :                     new_infomask |= HEAP_XMAX_SHR_LOCK;
    5452          811 :                     break;
    5453       140893 :                 case LockTupleNoKeyExclusive:
    5454       140893 :                     new_xmax = add_to_xmax;
    5455       140893 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    5456       140893 :                     break;
    5457        95889 :                 case LockTupleExclusive:
    5458        95889 :                     new_xmax = add_to_xmax;
    5459        95889 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    5460        95889 :                     new_infomask2 |= HEAP_KEYS_UPDATED;
    5461        95889 :                     break;
    5462            0 :                 default:
    5463            0 :                     new_xmax = InvalidTransactionId;    /* silence compiler */
    5464            0 :                     elog(ERROR, "invalid lock mode");
    5465              :             }
    5466              :         }
    5467              :     }
    5468       181042 :     else if (old_infomask & HEAP_XMAX_IS_MULTI)
    5469              :     {
    5470              :         MultiXactStatus new_status;
    5471              : 
    5472              :         /*
    5473              :          * Currently we don't allow XMAX_COMMITTED to be set for multis, so
    5474              :          * cross-check.
    5475              :          */
    5476              :         Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
    5477              : 
    5478              :         /*
    5479              :          * A multixact together with LOCK_ONLY set but neither lock bit set
    5480              :          * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
    5481              :          * anymore.  This check is critical for databases upgraded by
    5482              :          * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
    5483              :          * that such multis are never passed.
    5484              :          */
    5485        75558 :         if (HEAP_LOCKED_UPGRADED(old_infomask))
    5486              :         {
    5487            0 :             old_infomask &= ~HEAP_XMAX_IS_MULTI;
    5488            0 :             old_infomask |= HEAP_XMAX_INVALID;
    5489            0 :             goto l5;
    5490              :         }
    5491              : 
    5492              :         /*
    5493              :          * If the XMAX is already a MultiXactId, then we need to expand it to
    5494              :          * include add_to_xmax; but if all the members were lockers and are
    5495              :          * all gone, we can do away with the IS_MULTI bit and just set
    5496              :          * add_to_xmax as the only locker/updater.  If all lockers are gone
    5497              :          * and we have an updater that aborted, we can also do without a
    5498              :          * multi.
    5499              :          *
    5500              :          * The cost of doing GetMultiXactIdMembers would be paid by
    5501              :          * MultiXactIdExpand if we weren't to do this, so this check is not
    5502              :          * incurring extra work anyhow.
    5503              :          */
    5504        75558 :         if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
    5505              :         {
    5506           24 :             if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
    5507            8 :                 !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
    5508              :                                                                 old_infomask)))
    5509              :             {
    5510              :                 /*
    5511              :                  * Reset these bits and restart; otherwise fall through to
    5512              :                  * create a new multi below.
    5513              :                  */
    5514           24 :                 old_infomask &= ~HEAP_XMAX_IS_MULTI;
    5515           24 :                 old_infomask |= HEAP_XMAX_INVALID;
    5516           24 :                 goto l5;
    5517              :             }
    5518              :         }
    5519              : 
    5520        75534 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5521              : 
    5522        75534 :         new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
    5523              :                                      new_status);
    5524        75534 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5525              :     }
    5526       105484 :     else if (old_infomask & HEAP_XMAX_COMMITTED)
    5527              :     {
    5528              :         /*
    5529              :          * It's a committed update, so we need to preserve him as updater of
    5530              :          * the tuple.
    5531              :          */
    5532              :         MultiXactStatus status;
    5533              :         MultiXactStatus new_status;
    5534              : 
    5535           13 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5536            0 :             status = MultiXactStatusUpdate;
    5537              :         else
    5538           13 :             status = MultiXactStatusNoKeyUpdate;
    5539              : 
    5540           13 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5541              : 
    5542              :         /*
    5543              :          * since it's not running, it's obviously impossible for the old
    5544              :          * updater to be identical to the current one, so we need not check
    5545              :          * for that case as we do in the block above.
    5546              :          */
    5547           13 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    5548           13 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5549              :     }
    5550       105471 :     else if (TransactionIdIsInProgress(xmax))
    5551              :     {
    5552              :         /*
    5553              :          * If the XMAX is a valid, in-progress TransactionId, then we need to
    5554              :          * create a new MultiXactId that includes both the old locker or
    5555              :          * updater and our own TransactionId.
    5556              :          */
    5557              :         MultiXactStatus new_status;
    5558              :         MultiXactStatus old_status;
    5559              :         LockTupleMode old_mode;
    5560              : 
    5561       105462 :         if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5562              :         {
    5563       105436 :             if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5564         5717 :                 old_status = MultiXactStatusForKeyShare;
    5565        99719 :             else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5566          468 :                 old_status = MultiXactStatusForShare;
    5567        99251 :             else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5568              :             {
    5569        99251 :                 if (old_infomask2 & HEAP_KEYS_UPDATED)
    5570        92800 :                     old_status = MultiXactStatusForUpdate;
    5571              :                 else
    5572         6451 :                     old_status = MultiXactStatusForNoKeyUpdate;
    5573              :             }
    5574              :             else
    5575              :             {
    5576              :                 /*
    5577              :                  * LOCK_ONLY can be present alone only when a page has been
    5578              :                  * upgraded by pg_upgrade.  But in that case,
    5579              :                  * TransactionIdIsInProgress() should have returned false.  We
    5580              :                  * assume it's no longer locked in this case.
    5581              :                  */
    5582            0 :                 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
    5583            0 :                 old_infomask |= HEAP_XMAX_INVALID;
    5584            0 :                 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
    5585            0 :                 goto l5;
    5586              :             }
    5587              :         }
    5588              :         else
    5589              :         {
    5590              :             /* it's an update, but which kind? */
    5591           26 :             if (old_infomask2 & HEAP_KEYS_UPDATED)
    5592            0 :                 old_status = MultiXactStatusUpdate;
    5593              :             else
    5594           26 :                 old_status = MultiXactStatusNoKeyUpdate;
    5595              :         }
    5596              : 
    5597       105462 :         old_mode = TUPLOCK_from_mxstatus(old_status);
    5598              : 
    5599              :         /*
    5600              :          * If the lock to be acquired is for the same TransactionId as the
    5601              :          * existing lock, there's an optimization possible: consider only the
    5602              :          * strongest of both locks as the only one present, and restart.
    5603              :          */
    5604       105462 :         if (xmax == add_to_xmax)
    5605              :         {
    5606              :             /*
    5607              :              * Note that it's not possible for the original tuple to be
    5608              :              * updated: we wouldn't be here because the tuple would have been
    5609              :              * invisible and we wouldn't try to update it.  As a subtlety,
    5610              :              * this code can also run when traversing an update chain to lock
    5611              :              * future versions of a tuple.  But we wouldn't be here either,
    5612              :              * because the add_to_xmax would be different from the original
    5613              :              * updater.
    5614              :              */
    5615              :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5616              : 
    5617              :             /* acquire the strongest of both */
    5618       104321 :             if (mode < old_mode)
    5619        52198 :                 mode = old_mode;
    5620              :             /* mustn't touch is_update */
    5621              : 
    5622       104321 :             old_infomask |= HEAP_XMAX_INVALID;
    5623       104321 :             goto l5;
    5624              :         }
    5625              : 
    5626              :         /* otherwise, just fall back to creating a new multixact */
    5627         1141 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5628         1141 :         new_xmax = MultiXactIdCreate(xmax, old_status,
    5629              :                                      add_to_xmax, new_status);
    5630         1141 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5631              :     }
    5632           14 :     else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
    5633            5 :              TransactionIdDidCommit(xmax))
    5634            1 :     {
    5635              :         /*
    5636              :          * It's a committed update, so we gotta preserve him as updater of the
    5637              :          * tuple.
    5638              :          */
    5639              :         MultiXactStatus status;
    5640              :         MultiXactStatus new_status;
    5641              : 
    5642            1 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5643            0 :             status = MultiXactStatusUpdate;
    5644              :         else
    5645            1 :             status = MultiXactStatusNoKeyUpdate;
    5646              : 
    5647            1 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5648              : 
    5649              :         /*
    5650              :          * since it's not running, it's obviously impossible for the old
    5651              :          * updater to be identical to the current one, so we need not check
    5652              :          * for that case as we do in the block above.
    5653              :          */
    5654            1 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    5655            1 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5656              :     }
    5657              :     else
    5658              :     {
    5659              :         /*
    5660              :          * Can get here iff the locking/updating transaction was running when
    5661              :          * the infomask was extracted from the tuple, but finished before
    5662              :          * TransactionIdIsInProgress got to run.  Deal with it as if there was
    5663              :          * no locker at all in the first place.
    5664              :          */
    5665            8 :         old_infomask |= HEAP_XMAX_INVALID;
    5666            8 :         goto l5;
    5667              :     }
    5668              : 
    5669      2498525 :     *result_infomask = new_infomask;
    5670      2498525 :     *result_infomask2 = new_infomask2;
    5671      2498525 :     *result_xmax = new_xmax;
    5672      2498525 : }
    5673              : 
    5674              : /*
    5675              :  * Subroutine for heap_lock_updated_tuple_rec.
    5676              :  *
    5677              :  * Given a hypothetical multixact status held by the transaction identified
    5678              :  * with the given xid, does the current transaction need to wait, fail, or can
    5679              :  * it continue if it wanted to acquire a lock of the given mode?  "needwait"
    5680              :  * is set to true if waiting is necessary; if it can continue, then TM_Ok is
    5681              :  * returned.  If the lock is already held by the current transaction, return
    5682              :  * TM_SelfModified.  In case of a conflict with another transaction, a
    5683              :  * different HeapTupleSatisfiesUpdate return code is returned.
    5684              :  *
    5685              :  * The held status is said to be hypothetical because it might correspond to a
    5686              :  * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
    5687              :  * way for simplicity of API.
    5688              :  */
    5689              : static TM_Result
    5690        38774 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
    5691              :                            LockTupleMode mode, HeapTuple tup,
    5692              :                            bool *needwait)
    5693              : {
    5694              :     MultiXactStatus wantedstatus;
    5695              : 
    5696        38774 :     *needwait = false;
    5697        38774 :     wantedstatus = get_mxact_status_for_lock(mode, false);
    5698              : 
    5699              :     /*
    5700              :      * Note: we *must* check TransactionIdIsInProgress before
    5701              :      * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
    5702              :      * for an explanation.
    5703              :      */
    5704        38774 :     if (TransactionIdIsCurrentTransactionId(xid))
    5705              :     {
    5706              :         /*
    5707              :          * The tuple has already been locked by our own transaction.  This is
    5708              :          * very rare but can happen if multiple transactions are trying to
    5709              :          * lock an ancient version of the same tuple.
    5710              :          */
    5711            0 :         return TM_SelfModified;
    5712              :     }
    5713        38774 :     else if (TransactionIdIsInProgress(xid))
    5714              :     {
    5715              :         /*
    5716              :          * If the locking transaction is running, what we do depends on
    5717              :          * whether the lock modes conflict: if they do, then we must wait for
    5718              :          * it to finish; otherwise we can fall through to lock this tuple
    5719              :          * version without waiting.
    5720              :          */
    5721        36539 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5722        36539 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5723              :         {
    5724            8 :             *needwait = true;
    5725              :         }
    5726              : 
    5727              :         /*
    5728              :          * If we set needwait above, then this value doesn't matter;
    5729              :          * otherwise, this value signals to caller that it's okay to proceed.
    5730              :          */
    5731        36539 :         return TM_Ok;
    5732              :     }
    5733         2235 :     else if (TransactionIdDidAbort(xid))
    5734          206 :         return TM_Ok;
    5735         2029 :     else if (TransactionIdDidCommit(xid))
    5736              :     {
    5737              :         /*
    5738              :          * The other transaction committed.  If it was only a locker, then the
    5739              :          * lock is completely gone now and we can return success; but if it
    5740              :          * was an update, then what we do depends on whether the two lock
    5741              :          * modes conflict.  If they conflict, then we must report error to
    5742              :          * caller. But if they don't, we can fall through to allow the current
    5743              :          * transaction to lock the tuple.
    5744              :          *
    5745              :          * Note: the reason we worry about ISUPDATE here is because as soon as
    5746              :          * a transaction ends, all its locks are gone and meaningless, and
    5747              :          * thus we can ignore them; whereas its updates persist.  In the
    5748              :          * TransactionIdIsInProgress case, above, we don't need to check
    5749              :          * because we know the lock is still "alive" and thus a conflict needs
    5750              :          * always be checked.
    5751              :          */
    5752         2029 :         if (!ISUPDATE_from_mxstatus(status))
    5753         2020 :             return TM_Ok;
    5754              : 
    5755            9 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5756            9 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5757              :         {
    5758              :             /* bummer */
    5759            8 :             if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
    5760            6 :                 return TM_Updated;
    5761              :             else
    5762            2 :                 return TM_Deleted;
    5763              :         }
    5764              : 
    5765            1 :         return TM_Ok;
    5766              :     }
    5767              : 
    5768              :     /* Not in progress, not aborted, not committed -- must have crashed */
    5769            0 :     return TM_Ok;
    5770              : }
    5771              : 
    5772              : 
    5773              : /*
    5774              :  * Recursive part of heap_lock_updated_tuple
    5775              :  *
    5776              :  * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
    5777              :  * xid with the given mode; if this tuple is updated, recurse to lock the new
    5778              :  * version as well.
    5779              :  */
    5780              : static TM_Result
    5781         2213 : heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax,
    5782              :                             const ItemPointerData *tid, TransactionId xid,
    5783              :                             LockTupleMode mode)
    5784              : {
    5785              :     TM_Result   result;
    5786              :     ItemPointerData tupid;
    5787              :     HeapTupleData mytup;
    5788              :     Buffer      buf;
    5789              :     uint16      new_infomask,
    5790              :                 new_infomask2,
    5791              :                 old_infomask,
    5792              :                 old_infomask2;
    5793              :     TransactionId xmax,
    5794              :                 new_xmax;
    5795         2213 :     bool        cleared_all_frozen = false;
    5796              :     bool        pinned_desired_page;
    5797         2213 :     Buffer      vmbuffer = InvalidBuffer;
    5798              :     BlockNumber block;
    5799              : 
    5800         2213 :     ItemPointerCopy(tid, &tupid);
    5801              : 
    5802              :     for (;;)
    5803              :     {
    5804         2216 :         new_infomask = 0;
    5805         2216 :         new_xmax = InvalidTransactionId;
    5806         2216 :         block = ItemPointerGetBlockNumber(&tupid);
    5807         2216 :         ItemPointerCopy(&tupid, &(mytup.t_self));
    5808              : 
    5809         2216 :         if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
    5810              :         {
    5811              :             /*
    5812              :              * if we fail to find the updated version of the tuple, it's
    5813              :              * because it was vacuumed/pruned away after its creator
    5814              :              * transaction aborted.  So behave as if we got to the end of the
    5815              :              * chain, and there's no further tuple to lock: return success to
    5816              :              * caller.
    5817              :              */
    5818            0 :             result = TM_Ok;
    5819            0 :             goto out_unlocked;
    5820              :         }
    5821              : 
    5822         2216 : l4:
    5823         2224 :         CHECK_FOR_INTERRUPTS();
    5824              : 
    5825              :         /*
    5826              :          * Before locking the buffer, pin the visibility map page if it
    5827              :          * appears to be necessary.  Since we haven't got the lock yet,
    5828              :          * someone else might be in the middle of changing this, so we'll need
    5829              :          * to recheck after we have the lock.
    5830              :          */
    5831         2224 :         if (PageIsAllVisible(BufferGetPage(buf)))
    5832              :         {
    5833            0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5834            0 :             pinned_desired_page = true;
    5835              :         }
    5836              :         else
    5837         2224 :             pinned_desired_page = false;
    5838              : 
    5839         2224 :         LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5840              : 
    5841              :         /*
    5842              :          * If we didn't pin the visibility map page and the page has become
    5843              :          * all visible while we were busy locking the buffer, we'll have to
    5844              :          * unlock and re-lock, to avoid holding the buffer lock across I/O.
    5845              :          * That's a bit unfortunate, but hopefully shouldn't happen often.
    5846              :          *
    5847              :          * Note: in some paths through this function, we will reach here
    5848              :          * holding a pin on a vm page that may or may not be the one matching
    5849              :          * this page.  If this page isn't all-visible, we won't use the vm
    5850              :          * page, but we hold onto such a pin till the end of the function.
    5851              :          */
    5852         2224 :         if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
    5853              :         {
    5854            0 :             LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5855            0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5856            0 :             LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5857              :         }
    5858              : 
    5859              :         /*
    5860              :          * Check the tuple XMIN against prior XMAX, if any.  If we reached the
    5861              :          * end of the chain, we're done, so return success.
    5862              :          */
    5863         4448 :         if (TransactionIdIsValid(priorXmax) &&
    5864         2224 :             !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
    5865              :                                  priorXmax))
    5866              :         {
    5867            2 :             result = TM_Ok;
    5868            2 :             goto out_locked;
    5869              :         }
    5870              : 
    5871              :         /*
    5872              :          * Also check Xmin: if this tuple was created by an aborted
    5873              :          * (sub)transaction, then we already locked the last live one in the
    5874              :          * chain, thus we're done, so return success.
    5875              :          */
    5876         2222 :         if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
    5877              :         {
    5878           25 :             result = TM_Ok;
    5879           25 :             goto out_locked;
    5880              :         }
    5881              : 
    5882         2197 :         old_infomask = mytup.t_data->t_infomask;
    5883         2197 :         old_infomask2 = mytup.t_data->t_infomask2;
    5884         2197 :         xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5885              : 
    5886              :         /*
    5887              :          * If this tuple version has been updated or locked by some concurrent
    5888              :          * transaction(s), what we do depends on whether our lock mode
    5889              :          * conflicts with what those other transactions hold, and also on the
    5890              :          * status of them.
    5891              :          */
    5892         2197 :         if (!(old_infomask & HEAP_XMAX_INVALID))
    5893              :         {
    5894              :             TransactionId rawxmax;
    5895              :             bool        needwait;
    5896              : 
    5897         2138 :             rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5898         2138 :             if (old_infomask & HEAP_XMAX_IS_MULTI)
    5899              :             {
    5900              :                 int         nmembers;
    5901              :                 int         i;
    5902              :                 MultiXactMember *members;
    5903              : 
    5904              :                 /*
    5905              :                  * We don't need a test for pg_upgrade'd tuples: this is only
    5906              :                  * applied to tuples after the first in an update chain.  Said
    5907              :                  * first tuple in the chain may well be locked-in-9.2-and-
    5908              :                  * pg_upgraded, but that one was already locked by our caller,
    5909              :                  * not us; and any subsequent ones cannot be because our
    5910              :                  * caller must necessarily have obtained a snapshot later than
    5911              :                  * the pg_upgrade itself.
    5912              :                  */
    5913              :                 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
    5914              : 
    5915         2109 :                 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
    5916         2109 :                                                  HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5917        40854 :                 for (i = 0; i < nmembers; i++)
    5918              :                 {
    5919        38745 :                     result = test_lockmode_for_conflict(members[i].status,
    5920        38745 :                                                         members[i].xid,
    5921              :                                                         mode,
    5922              :                                                         &mytup,
    5923              :                                                         &needwait);
    5924              : 
    5925              :                     /*
    5926              :                      * If the tuple was already locked by ourselves in a
    5927              :                      * previous iteration of this (say heap_lock_tuple was
    5928              :                      * forced to restart the locking loop because of a change
    5929              :                      * in xmax), then we hold the lock already on this tuple
    5930              :                      * version and we don't need to do anything; and this is
    5931              :                      * not an error condition either.  We just need to skip
    5932              :                      * this tuple and continue locking the next version in the
    5933              :                      * update chain.
    5934              :                      */
    5935        38745 :                     if (result == TM_SelfModified)
    5936              :                     {
    5937            0 :                         pfree(members);
    5938            0 :                         goto next;
    5939              :                     }
    5940              : 
    5941        38745 :                     if (needwait)
    5942              :                     {
    5943            0 :                         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5944            0 :                         XactLockTableWait(members[i].xid, rel,
    5945              :                                           &mytup.t_self,
    5946              :                                           XLTW_LockUpdated);
    5947            0 :                         pfree(members);
    5948            0 :                         goto l4;
    5949              :                     }
    5950        38745 :                     if (result != TM_Ok)
    5951              :                     {
    5952            0 :                         pfree(members);
    5953            0 :                         goto out_locked;
    5954              :                     }
    5955              :                 }
    5956         2109 :                 if (members)
    5957         2109 :                     pfree(members);
    5958              :             }
    5959              :             else
    5960              :             {
    5961              :                 MultiXactStatus status;
    5962              : 
    5963              :                 /*
    5964              :                  * For a non-multi Xmax, we first need to compute the
    5965              :                  * corresponding MultiXactStatus by using the infomask bits.
    5966              :                  */
    5967           29 :                 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5968              :                 {
    5969           10 :                     if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5970           10 :                         status = MultiXactStatusForKeyShare;
    5971            0 :                     else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5972            0 :                         status = MultiXactStatusForShare;
    5973            0 :                     else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5974              :                     {
    5975            0 :                         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5976            0 :                             status = MultiXactStatusForUpdate;
    5977              :                         else
    5978            0 :                             status = MultiXactStatusForNoKeyUpdate;
    5979              :                     }
    5980              :                     else
    5981              :                     {
    5982              :                         /*
    5983              :                          * LOCK_ONLY present alone (a pg_upgraded tuple marked
    5984              :                          * as share-locked in the old cluster) shouldn't be
    5985              :                          * seen in the middle of an update chain.
    5986              :                          */
    5987            0 :                         elog(ERROR, "invalid lock status in tuple");
    5988              :                     }
    5989              :                 }
    5990              :                 else
    5991              :                 {
    5992              :                     /* it's an update, but which kind? */
    5993           19 :                     if (old_infomask2 & HEAP_KEYS_UPDATED)
    5994           14 :                         status = MultiXactStatusUpdate;
    5995              :                     else
    5996            5 :                         status = MultiXactStatusNoKeyUpdate;
    5997              :                 }
    5998              : 
    5999           29 :                 result = test_lockmode_for_conflict(status, rawxmax, mode,
    6000              :                                                     &mytup, &needwait);
    6001              : 
    6002              :                 /*
    6003              :                  * If the tuple was already locked by ourselves in a previous
    6004              :                  * iteration of this (say heap_lock_tuple was forced to
    6005              :                  * restart the locking loop because of a change in xmax), then
    6006              :                  * we hold the lock already on this tuple version and we don't
    6007              :                  * need to do anything; and this is not an error condition
    6008              :                  * either.  We just need to skip this tuple and continue
    6009              :                  * locking the next version in the update chain.
    6010              :                  */
    6011           29 :                 if (result == TM_SelfModified)
    6012            0 :                     goto next;
    6013              : 
    6014           29 :                 if (needwait)
    6015              :                 {
    6016            8 :                     LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    6017            8 :                     XactLockTableWait(rawxmax, rel, &mytup.t_self,
    6018              :                                       XLTW_LockUpdated);
    6019            8 :                     goto l4;
    6020              :                 }
    6021           21 :                 if (result != TM_Ok)
    6022              :                 {
    6023            8 :                     goto out_locked;
    6024              :                 }
    6025              :             }
    6026              :         }
    6027              : 
    6028              :         /* compute the new Xmax and infomask values for the tuple ... */
    6029         2181 :         compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
    6030              :                                   xid, mode, false,
    6031              :                                   &new_xmax, &new_infomask, &new_infomask2);
    6032              : 
    6033         2181 :         if (PageIsAllVisible(BufferGetPage(buf)) &&
    6034            0 :             visibilitymap_clear(rel, block, vmbuffer,
    6035              :                                 VISIBILITYMAP_ALL_FROZEN))
    6036            0 :             cleared_all_frozen = true;
    6037              : 
    6038         2181 :         START_CRIT_SECTION();
    6039              : 
    6040              :         /* ... and set them */
    6041         2181 :         HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
    6042         2181 :         mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
    6043         2181 :         mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6044         2181 :         mytup.t_data->t_infomask |= new_infomask;
    6045         2181 :         mytup.t_data->t_infomask2 |= new_infomask2;
    6046              : 
    6047         2181 :         MarkBufferDirty(buf);
    6048              : 
    6049              :         /* XLOG stuff */
    6050         2181 :         if (RelationNeedsWAL(rel))
    6051              :         {
    6052              :             xl_heap_lock_updated xlrec;
    6053              :             XLogRecPtr  recptr;
    6054         2181 :             Page        page = BufferGetPage(buf);
    6055              : 
    6056         2181 :             XLogBeginInsert();
    6057         2181 :             XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    6058              : 
    6059         2181 :             xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
    6060         2181 :             xlrec.xmax = new_xmax;
    6061         2181 :             xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
    6062         2181 :             xlrec.flags =
    6063         2181 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    6064              : 
    6065         2181 :             XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
    6066              : 
    6067         2181 :             recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
    6068              : 
    6069         2181 :             PageSetLSN(page, recptr);
    6070              :         }
    6071              : 
    6072         2181 :         END_CRIT_SECTION();
    6073              : 
    6074         2181 : next:
    6075              :         /* if we find the end of update chain, we're done. */
    6076         4362 :         if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
    6077         4362 :             HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
    6078         2185 :             ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
    6079            4 :             HeapTupleHeaderIsOnlyLocked(mytup.t_data))
    6080              :         {
    6081         2178 :             result = TM_Ok;
    6082         2178 :             goto out_locked;
    6083              :         }
    6084              : 
    6085              :         /* tail recursion */
    6086            3 :         priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
    6087            3 :         ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
    6088            3 :         UnlockReleaseBuffer(buf);
    6089              :     }
    6090              : 
    6091              :     result = TM_Ok;
    6092              : 
    6093         2213 : out_locked:
    6094         2213 :     UnlockReleaseBuffer(buf);
    6095              : 
    6096         2213 : out_unlocked:
    6097         2213 :     if (vmbuffer != InvalidBuffer)
    6098            0 :         ReleaseBuffer(vmbuffer);
    6099              : 
    6100         2213 :     return result;
    6101              : }
    6102              : 
    6103              : /*
    6104              :  * heap_lock_updated_tuple
    6105              :  *      Follow update chain when locking an updated tuple, acquiring locks (row
    6106              :  *      marks) on the updated versions.
    6107              :  *
    6108              :  * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
    6109              :  * fields from the initial tuple.  We will lock the tuples starting from the
    6110              :  * one that 'prior_ctid' points to.  Note: This function does not lock the
    6111              :  * initial tuple itself.
    6112              :  *
    6113              :  * This function doesn't check visibility, it just unconditionally marks the
    6114              :  * tuple(s) as locked.  If any tuple in the updated chain is being deleted
    6115              :  * concurrently (or updated with the key being modified), sleep until the
    6116              :  * transaction doing it is finished.
    6117              :  *
    6118              :  * Note that we don't acquire heavyweight tuple locks on the tuples we walk
    6119              :  * when we have to wait for other transactions to release them, as opposed to
    6120              :  * what heap_lock_tuple does.  The reason is that having more than one
    6121              :  * transaction walking the chain is probably uncommon enough that risk of
    6122              :  * starvation is not likely: one of the preconditions for being here is that
    6123              :  * the snapshot in use predates the update that created this tuple (because we
    6124              :  * started at an earlier version of the tuple), but at the same time such a
    6125              :  * transaction cannot be using repeatable read or serializable isolation
    6126              :  * levels, because that would lead to a serializability failure.
    6127              :  */
    6128              : static TM_Result
    6129         2215 : heap_lock_updated_tuple(Relation rel,
    6130              :                         uint16 prior_infomask,
    6131              :                         TransactionId prior_raw_xmax,
    6132              :                         const ItemPointerData *prior_ctid,
    6133              :                         TransactionId xid, LockTupleMode mode)
    6134              : {
    6135         2215 :     INJECTION_POINT("heap_lock_updated_tuple", NULL);
    6136              : 
    6137              :     /*
    6138              :      * If the tuple has moved into another partition (effectively a delete)
    6139              :      * stop here.
    6140              :      */
    6141         2215 :     if (!ItemPointerIndicatesMovedPartitions(prior_ctid))
    6142              :     {
    6143              :         TransactionId prior_xmax;
    6144              : 
    6145              :         /*
    6146              :          * If this is the first possibly-multixact-able operation in the
    6147              :          * current transaction, set my per-backend OldestMemberMXactId
    6148              :          * setting. We can be certain that the transaction will never become a
    6149              :          * member of any older MultiXactIds than that.  (We have to do this
    6150              :          * even if we end up just using our own TransactionId below, since
    6151              :          * some other backend could incorporate our XID into a MultiXact
    6152              :          * immediately afterwards.)
    6153              :          */
    6154         2213 :         MultiXactIdSetOldestMember();
    6155              : 
    6156         4426 :         prior_xmax = (prior_infomask & HEAP_XMAX_IS_MULTI) ?
    6157         2213 :             MultiXactIdGetUpdateXid(prior_raw_xmax, prior_infomask) : prior_raw_xmax;
    6158         2213 :         return heap_lock_updated_tuple_rec(rel, prior_xmax, prior_ctid, xid, mode);
    6159              :     }
    6160              : 
    6161              :     /* nothing to lock */
    6162            2 :     return TM_Ok;
    6163              : }
    6164              : 
    6165              : /*
    6166              :  *  heap_finish_speculative - mark speculative insertion as successful
    6167              :  *
    6168              :  * To successfully finish a speculative insertion we have to clear speculative
    6169              :  * token from tuple.  To do so the t_ctid field, which will contain a
    6170              :  * speculative token value, is modified in place to point to the tuple itself,
    6171              :  * which is characteristic of a newly inserted ordinary tuple.
    6172              :  *
    6173              :  * NB: It is not ok to commit without either finishing or aborting a
    6174              :  * speculative insertion.  We could treat speculative tuples of committed
    6175              :  * transactions implicitly as completed, but then we would have to be prepared
    6176              :  * to deal with speculative tokens on committed tuples.  That wouldn't be
    6177              :  * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
    6178              :  * but clearing the token at completion isn't very expensive either.
    6179              :  * An explicit confirmation WAL record also makes logical decoding simpler.
    6180              :  */
    6181              : void
    6182         2215 : heap_finish_speculative(Relation relation, const ItemPointerData *tid)
    6183              : {
    6184              :     Buffer      buffer;
    6185              :     Page        page;
    6186              :     OffsetNumber offnum;
    6187              :     ItemId      lp;
    6188              :     HeapTupleHeader htup;
    6189              : 
    6190         2215 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    6191         2215 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6192         2215 :     page = BufferGetPage(buffer);
    6193              : 
    6194         2215 :     offnum = ItemPointerGetOffsetNumber(tid);
    6195         2215 :     if (offnum < 1 || offnum > PageGetMaxOffsetNumber(page))
    6196            0 :         elog(ERROR, "offnum out of range");
    6197         2215 :     lp = PageGetItemId(page, offnum);
    6198         2215 :     if (!ItemIdIsNormal(lp))
    6199            0 :         elog(ERROR, "invalid lp");
    6200              : 
    6201         2215 :     htup = (HeapTupleHeader) PageGetItem(page, lp);
    6202              : 
    6203              :     /* NO EREPORT(ERROR) from here till changes are logged */
    6204         2215 :     START_CRIT_SECTION();
    6205              : 
    6206              :     Assert(HeapTupleHeaderIsSpeculative(htup));
    6207              : 
    6208         2215 :     MarkBufferDirty(buffer);
    6209              : 
    6210              :     /*
    6211              :      * Replace the speculative insertion token with a real t_ctid, pointing to
    6212              :      * itself like it does on regular tuples.
    6213              :      */
    6214         2215 :     htup->t_ctid = *tid;
    6215              : 
    6216              :     /* XLOG stuff */
    6217         2215 :     if (RelationNeedsWAL(relation))
    6218              :     {
    6219              :         xl_heap_confirm xlrec;
    6220              :         XLogRecPtr  recptr;
    6221              : 
    6222         2195 :         xlrec.offnum = ItemPointerGetOffsetNumber(tid);
    6223              : 
    6224         2195 :         XLogBeginInsert();
    6225              : 
    6226              :         /* We want the same filtering on this as on a plain insert */
    6227         2195 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    6228              : 
    6229         2195 :         XLogRegisterData(&xlrec, SizeOfHeapConfirm);
    6230         2195 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    6231              : 
    6232         2195 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
    6233              : 
    6234         2195 :         PageSetLSN(page, recptr);
    6235              :     }
    6236              : 
    6237         2215 :     END_CRIT_SECTION();
    6238              : 
    6239         2215 :     UnlockReleaseBuffer(buffer);
    6240         2215 : }
    6241              : 
    6242              : /*
    6243              :  *  heap_abort_speculative - kill a speculatively inserted tuple
    6244              :  *
    6245              :  * Marks a tuple that was speculatively inserted in the same command as dead,
    6246              :  * by setting its xmin as invalid.  That makes it immediately appear as dead
    6247              :  * to all transactions, including our own.  In particular, it makes
    6248              :  * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
    6249              :  * inserting a duplicate key value won't unnecessarily wait for our whole
    6250              :  * transaction to finish (it'll just wait for our speculative insertion to
    6251              :  * finish).
    6252              :  *
    6253              :  * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
    6254              :  * that arise due to a mutual dependency that is not user visible.  By
    6255              :  * definition, unprincipled deadlocks cannot be prevented by the user
    6256              :  * reordering lock acquisition in client code, because the implementation level
    6257              :  * lock acquisitions are not under the user's direct control.  If speculative
    6258              :  * inserters did not take this precaution, then under high concurrency they
    6259              :  * could deadlock with each other, which would not be acceptable.
    6260              :  *
    6261              :  * This is somewhat redundant with heap_delete, but we prefer to have a
    6262              :  * dedicated routine with stripped down requirements.  Note that this is also
    6263              :  * used to delete the TOAST tuples created during speculative insertion.
    6264              :  *
    6265              :  * This routine does not affect logical decoding as it only looks at
    6266              :  * confirmation records.
    6267              :  */
    6268              : void
    6269           16 : heap_abort_speculative(Relation relation, const ItemPointerData *tid)
    6270              : {
    6271           16 :     TransactionId xid = GetCurrentTransactionId();
    6272              :     ItemId      lp;
    6273              :     HeapTupleData tp;
    6274              :     Page        page;
    6275              :     BlockNumber block;
    6276              :     Buffer      buffer;
    6277              : 
    6278              :     Assert(ItemPointerIsValid(tid));
    6279              : 
    6280           16 :     block = ItemPointerGetBlockNumber(tid);
    6281           16 :     buffer = ReadBuffer(relation, block);
    6282           16 :     page = BufferGetPage(buffer);
    6283              : 
    6284           16 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6285              : 
    6286              :     /*
    6287              :      * Page can't be all visible, we just inserted into it, and are still
    6288              :      * running.
    6289              :      */
    6290              :     Assert(!PageIsAllVisible(page));
    6291              : 
    6292           16 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    6293              :     Assert(ItemIdIsNormal(lp));
    6294              : 
    6295           16 :     tp.t_tableOid = RelationGetRelid(relation);
    6296           16 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    6297           16 :     tp.t_len = ItemIdGetLength(lp);
    6298           16 :     tp.t_self = *tid;
    6299              : 
    6300              :     /*
    6301              :      * Sanity check that the tuple really is a speculatively inserted tuple,
    6302              :      * inserted by us.
    6303              :      */
    6304           16 :     if (tp.t_data->t_choice.t_heap.t_xmin != xid)
    6305            0 :         elog(ERROR, "attempted to kill a tuple inserted by another transaction");
    6306           16 :     if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
    6307            0 :         elog(ERROR, "attempted to kill a non-speculative tuple");
    6308              :     Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
    6309              : 
    6310              :     /*
    6311              :      * No need to check for serializable conflicts here.  There is never a
    6312              :      * need for a combo CID, either.  No need to extract replica identity, or
    6313              :      * do anything special with infomask bits.
    6314              :      */
    6315              : 
    6316           16 :     START_CRIT_SECTION();
    6317              : 
    6318              :     /*
    6319              :      * The tuple will become DEAD immediately.  Flag that this page is a
    6320              :      * candidate for pruning by setting xmin to TransactionXmin. While not
    6321              :      * immediately prunable, it is the oldest xid we can cheaply determine
    6322              :      * that's safe against wraparound / being older than the table's
    6323              :      * relfrozenxid.  To defend against the unlikely case of a new relation
    6324              :      * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
    6325              :      * if so (vacuum can't subsequently move relfrozenxid to beyond
    6326              :      * TransactionXmin, so there's no race here).
    6327              :      */
    6328              :     Assert(TransactionIdIsValid(TransactionXmin));
    6329              :     {
    6330           16 :         TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
    6331              :         TransactionId prune_xid;
    6332              : 
    6333           16 :         if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
    6334            0 :             prune_xid = relfrozenxid;
    6335              :         else
    6336           16 :             prune_xid = TransactionXmin;
    6337           16 :         PageSetPrunable(page, prune_xid);
    6338              :     }
    6339              : 
    6340              :     /* store transaction information of xact deleting the tuple */
    6341           16 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    6342           16 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6343              : 
    6344              :     /*
    6345              :      * Set the tuple header xmin to InvalidTransactionId.  This makes the
    6346              :      * tuple immediately invisible everyone.  (In particular, to any
    6347              :      * transactions waiting on the speculative token, woken up later.)
    6348              :      */
    6349           16 :     HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
    6350              : 
    6351              :     /* Clear the speculative insertion token too */
    6352           16 :     tp.t_data->t_ctid = tp.t_self;
    6353              : 
    6354           16 :     MarkBufferDirty(buffer);
    6355              : 
    6356              :     /*
    6357              :      * XLOG stuff
    6358              :      *
    6359              :      * The WAL records generated here match heap_delete().  The same recovery
    6360              :      * routines are used.
    6361              :      */
    6362           16 :     if (RelationNeedsWAL(relation))
    6363              :     {
    6364              :         xl_heap_delete xlrec;
    6365              :         XLogRecPtr  recptr;
    6366              : 
    6367           12 :         xlrec.flags = XLH_DELETE_IS_SUPER;
    6368           24 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    6369           12 :                                               tp.t_data->t_infomask2);
    6370           12 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    6371           12 :         xlrec.xmax = xid;
    6372              : 
    6373           12 :         XLogBeginInsert();
    6374           12 :         XLogRegisterData(&xlrec, SizeOfHeapDelete);
    6375           12 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    6376              : 
    6377              :         /* No replica identity & replication origin logged */
    6378              : 
    6379           12 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    6380              : 
    6381           12 :         PageSetLSN(page, recptr);
    6382              :     }
    6383              : 
    6384           16 :     END_CRIT_SECTION();
    6385              : 
    6386           16 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6387              : 
    6388           16 :     if (HeapTupleHasExternal(&tp))
    6389              :     {
    6390              :         Assert(!IsToastRelation(relation));
    6391            1 :         heap_toast_delete(relation, &tp, true);
    6392              :     }
    6393              : 
    6394              :     /*
    6395              :      * Never need to mark tuple for invalidation, since catalogs don't support
    6396              :      * speculative insertion
    6397              :      */
    6398              : 
    6399              :     /* Now we can release the buffer */
    6400           16 :     ReleaseBuffer(buffer);
    6401              : 
    6402              :     /* count deletion, as we counted the insertion too */
    6403           16 :     pgstat_count_heap_delete(relation);
    6404           16 : }
    6405              : 
    6406              : /*
    6407              :  * heap_inplace_lock - protect inplace update from concurrent heap_update()
    6408              :  *
    6409              :  * Evaluate whether the tuple's state is compatible with a no-key update.
    6410              :  * Current transaction rowmarks are fine, as is KEY SHARE from any
    6411              :  * transaction.  If compatible, return true with the buffer exclusive-locked,
    6412              :  * and the caller must release that by calling
    6413              :  * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
    6414              :  * an error.  Otherwise, call release_callback(arg), wait for blocking
    6415              :  * transactions to end, and return false.
    6416              :  *
    6417              :  * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
    6418              :  * DDL, this doesn't guarantee any particular predicate locking.
    6419              :  *
    6420              :  * heap_delete() is a rarer source of blocking transactions (xwait).  We'll
    6421              :  * wait for such a transaction just like for the normal heap_update() case.
    6422              :  * Normal concurrent DROP commands won't cause that, because all inplace
    6423              :  * updaters take some lock that conflicts with DROP.  An explicit SQL "DELETE
    6424              :  * FROM pg_class" can cause it.  By waiting, if the concurrent transaction
    6425              :  * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
    6426              :  * can find the successor tuple.
    6427              :  *
    6428              :  * Readers of inplace-updated fields expect changes to those fields are
    6429              :  * durable.  For example, vac_truncate_clog() reads datfrozenxid from
    6430              :  * pg_database tuples via catalog snapshots.  A future snapshot must not
    6431              :  * return a lower datfrozenxid for the same database OID (lower in the
    6432              :  * FullTransactionIdPrecedes() sense).  We achieve that since no update of a
    6433              :  * tuple can start while we hold a lock on its buffer.  In cases like
    6434              :  * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
    6435              :  * to this transaction.  ROLLBACK then is one case where it's okay to lose
    6436              :  * inplace updates.  (Restoring relhasindex=false on ROLLBACK is fine, since
    6437              :  * any concurrent CREATE INDEX would have blocked, then inplace-updated the
    6438              :  * committed tuple.)
    6439              :  *
    6440              :  * In principle, we could avoid waiting by overwriting every tuple in the
    6441              :  * updated tuple chain.  Reader expectations permit updating a tuple only if
    6442              :  * it's aborted, is the tail of the chain, or we already updated the tuple
    6443              :  * referenced in its t_ctid.  Hence, we would need to overwrite the tuples in
    6444              :  * order from tail to head.  That would imply either (a) mutating all tuples
    6445              :  * in one critical section or (b) accepting a chance of partial completion.
    6446              :  * Partial completion of a relfrozenxid update would have the weird
    6447              :  * consequence that the table's next VACUUM could see the table's relfrozenxid
    6448              :  * move forward between vacuum_get_cutoffs() and finishing.
    6449              :  */
    6450              : bool
    6451       214226 : heap_inplace_lock(Relation relation,
    6452              :                   HeapTuple oldtup_ptr, Buffer buffer,
    6453              :                   void (*release_callback) (void *), void *arg)
    6454              : {
    6455       214226 :     HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
    6456              :     TM_Result   result;
    6457              :     bool        ret;
    6458              : 
    6459              : #ifdef USE_ASSERT_CHECKING
    6460              :     if (RelationGetRelid(relation) == RelationRelationId)
    6461              :         check_inplace_rel_lock(oldtup_ptr);
    6462              : #endif
    6463              : 
    6464              :     Assert(BufferIsValid(buffer));
    6465              : 
    6466              :     /*
    6467              :      * Register shared cache invals if necessary.  Other sessions may finish
    6468              :      * inplace updates of this tuple between this step and LockTuple().  Since
    6469              :      * inplace updates don't change cache keys, that's harmless.
    6470              :      *
    6471              :      * While it's tempting to register invals only after confirming we can
    6472              :      * return true, the following obstacle precludes reordering steps that
    6473              :      * way.  Registering invals might reach a CatalogCacheInitializeCache()
    6474              :      * that locks "buffer".  That would hang indefinitely if running after our
    6475              :      * own LockBuffer().  Hence, we must register invals before LockBuffer().
    6476              :      */
    6477       214226 :     CacheInvalidateHeapTupleInplace(relation, oldtup_ptr);
    6478              : 
    6479       214226 :     LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
    6480       214226 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6481              : 
    6482              :     /*----------
    6483              :      * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
    6484              :      *
    6485              :      * - wait unconditionally
    6486              :      * - already locked tuple above, since inplace needs that unconditionally
    6487              :      * - don't recheck header after wait: simpler to defer to next iteration
    6488              :      * - don't try to continue even if the updater aborts: likewise
    6489              :      * - no crosscheck
    6490              :      */
    6491       214226 :     result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
    6492              :                                       buffer);
    6493              : 
    6494       214226 :     if (result == TM_Invisible)
    6495              :     {
    6496              :         /* no known way this can happen */
    6497            0 :         ereport(ERROR,
    6498              :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    6499              :                  errmsg_internal("attempted to overwrite invisible tuple")));
    6500              :     }
    6501       214226 :     else if (result == TM_SelfModified)
    6502              :     {
    6503              :         /*
    6504              :          * CREATE INDEX might reach this if an expression is silly enough to
    6505              :          * call e.g. SELECT ... FROM pg_class FOR SHARE.  C code of other SQL
    6506              :          * statements might get here after a heap_update() of the same row, in
    6507              :          * the absence of an intervening CommandCounterIncrement().
    6508              :          */
    6509            0 :         ereport(ERROR,
    6510              :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    6511              :                  errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
    6512              :     }
    6513       214226 :     else if (result == TM_BeingModified)
    6514              :     {
    6515              :         TransactionId xwait;
    6516              :         uint16      infomask;
    6517              : 
    6518           30 :         xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    6519           30 :         infomask = oldtup.t_data->t_infomask;
    6520              : 
    6521           30 :         if (infomask & HEAP_XMAX_IS_MULTI)
    6522              :         {
    6523            5 :             LockTupleMode lockmode = LockTupleNoKeyExclusive;
    6524            5 :             MultiXactStatus mxact_status = MultiXactStatusNoKeyUpdate;
    6525              :             int         remain;
    6526              : 
    6527            5 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    6528              :                                         lockmode, NULL))
    6529              :             {
    6530            2 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6531            2 :                 release_callback(arg);
    6532            2 :                 ret = false;
    6533            2 :                 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
    6534              :                                 relation, &oldtup.t_self, XLTW_Update,
    6535              :                                 &remain);
    6536              :             }
    6537              :             else
    6538            3 :                 ret = true;
    6539              :         }
    6540           25 :         else if (TransactionIdIsCurrentTransactionId(xwait))
    6541            1 :             ret = true;
    6542           24 :         else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
    6543            1 :             ret = true;
    6544              :         else
    6545              :         {
    6546           23 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6547           23 :             release_callback(arg);
    6548           23 :             ret = false;
    6549           23 :             XactLockTableWait(xwait, relation, &oldtup.t_self,
    6550              :                               XLTW_Update);
    6551              :         }
    6552              :     }
    6553              :     else
    6554              :     {
    6555       214196 :         ret = (result == TM_Ok);
    6556       214196 :         if (!ret)
    6557              :         {
    6558            0 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6559            0 :             release_callback(arg);
    6560              :         }
    6561              :     }
    6562              : 
    6563              :     /*
    6564              :      * GetCatalogSnapshot() relies on invalidation messages to know when to
    6565              :      * take a new snapshot.  COMMIT of xwait is responsible for sending the
    6566              :      * invalidation.  We're not acquiring heavyweight locks sufficient to
    6567              :      * block if not yet sent, so we must take a new snapshot to ensure a later
    6568              :      * attempt has a fair chance.  While we don't need this if xwait aborted,
    6569              :      * don't bother optimizing that.
    6570              :      */
    6571       214226 :     if (!ret)
    6572              :     {
    6573           25 :         UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
    6574           25 :         ForgetInplace_Inval();
    6575           25 :         InvalidateCatalogSnapshot();
    6576              :     }
    6577       214226 :     return ret;
    6578              : }
    6579              : 
    6580              : /*
    6581              :  * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
    6582              :  *
    6583              :  * The tuple cannot change size, and therefore its header fields and null
    6584              :  * bitmap (if any) don't change either.
    6585              :  *
    6586              :  * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
    6587              :  */
    6588              : void
    6589        95748 : heap_inplace_update_and_unlock(Relation relation,
    6590              :                                HeapTuple oldtup, HeapTuple tuple,
    6591              :                                Buffer buffer)
    6592              : {
    6593        95748 :     HeapTupleHeader htup = oldtup->t_data;
    6594              :     uint32      oldlen;
    6595              :     uint32      newlen;
    6596              :     char       *dst;
    6597              :     char       *src;
    6598        95748 :     int         nmsgs = 0;
    6599        95748 :     SharedInvalidationMessage *invalMessages = NULL;
    6600        95748 :     bool        RelcacheInitFileInval = false;
    6601              : 
    6602              :     Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
    6603        95748 :     oldlen = oldtup->t_len - htup->t_hoff;
    6604        95748 :     newlen = tuple->t_len - tuple->t_data->t_hoff;
    6605        95748 :     if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
    6606            0 :         elog(ERROR, "wrong tuple length");
    6607              : 
    6608        95748 :     dst = (char *) htup + htup->t_hoff;
    6609        95748 :     src = (char *) tuple->t_data + tuple->t_data->t_hoff;
    6610              : 
    6611              :     /* Like RecordTransactionCommit(), log only if needed */
    6612        95748 :     if (XLogStandbyInfoActive())
    6613        60727 :         nmsgs = inplaceGetInvalidationMessages(&invalMessages,
    6614              :                                                &RelcacheInitFileInval);
    6615              : 
    6616              :     /*
    6617              :      * Unlink relcache init files as needed.  If unlinking, acquire
    6618              :      * RelCacheInitLock until after associated invalidations.  By doing this
    6619              :      * in advance, if we checkpoint and then crash between inplace
    6620              :      * XLogInsert() and inval, we don't rely on StartupXLOG() ->
    6621              :      * RelationCacheInitFileRemove().  That uses elevel==LOG, so replay would
    6622              :      * neglect to PANIC on EIO.
    6623              :      */
    6624        95748 :     PreInplace_Inval();
    6625              : 
    6626              :     /*----------
    6627              :      * NO EREPORT(ERROR) from here till changes are complete
    6628              :      *
    6629              :      * Our exclusive buffer lock won't stop a reader having already pinned and
    6630              :      * checked visibility for this tuple. With the usual order of changes
    6631              :      * (i.e. updating the buffer contents before WAL logging), a reader could
    6632              :      * observe our not-yet-persistent update to relfrozenxid and update
    6633              :      * datfrozenxid based on that. A crash in that moment could allow
    6634              :      * datfrozenxid to overtake relfrozenxid:
    6635              :      *
    6636              :      * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
    6637              :      * ["R" is a VACUUM tbl]
    6638              :      * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
    6639              :      * D: systable_getnext() returns pg_class tuple of tbl
    6640              :      * R: memcpy() into pg_class tuple of tbl
    6641              :      * D: raise pg_database.datfrozenxid, XLogInsert(), finish
    6642              :      * [crash]
    6643              :      * [recovery restores datfrozenxid w/o relfrozenxid]
    6644              :      *
    6645              :      * We avoid that by using a temporary copy of the buffer to hide our
    6646              :      * change from other backends until the change has been WAL-logged. We
    6647              :      * apply our change to the temporary copy and WAL-log it, before modifying
    6648              :      * the real page. That way any action a reader of the in-place-updated
    6649              :      * value takes will be WAL logged after this change.
    6650              :      */
    6651        95748 :     START_CRIT_SECTION();
    6652              : 
    6653        95748 :     MarkBufferDirty(buffer);
    6654              : 
    6655              :     /* XLOG stuff */
    6656        95748 :     if (RelationNeedsWAL(relation))
    6657              :     {
    6658              :         xl_heap_inplace xlrec;
    6659              :         PGAlignedBlock copied_buffer;
    6660        95740 :         char       *origdata = (char *) BufferGetBlock(buffer);
    6661        95740 :         Page        page = BufferGetPage(buffer);
    6662        95740 :         uint16      lower = ((PageHeader) page)->pd_lower;
    6663        95740 :         uint16      upper = ((PageHeader) page)->pd_upper;
    6664              :         uintptr_t   dst_offset_in_block;
    6665              :         RelFileLocator rlocator;
    6666              :         ForkNumber  forkno;
    6667              :         BlockNumber blkno;
    6668              :         XLogRecPtr  recptr;
    6669              : 
    6670        95740 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    6671        95740 :         xlrec.dbId = MyDatabaseId;
    6672        95740 :         xlrec.tsId = MyDatabaseTableSpace;
    6673        95740 :         xlrec.relcacheInitFileInval = RelcacheInitFileInval;
    6674        95740 :         xlrec.nmsgs = nmsgs;
    6675              : 
    6676        95740 :         XLogBeginInsert();
    6677        95740 :         XLogRegisterData(&xlrec, MinSizeOfHeapInplace);
    6678        95740 :         if (nmsgs != 0)
    6679        44662 :             XLogRegisterData(invalMessages,
    6680              :                              nmsgs * sizeof(SharedInvalidationMessage));
    6681              : 
    6682              :         /* register block matching what buffer will look like after changes */
    6683        95740 :         memcpy(copied_buffer.data, origdata, lower);
    6684        95740 :         memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
    6685        95740 :         dst_offset_in_block = dst - origdata;
    6686        95740 :         memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
    6687        95740 :         BufferGetTag(buffer, &rlocator, &forkno, &blkno);
    6688              :         Assert(forkno == MAIN_FORKNUM);
    6689        95740 :         XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
    6690              :                           REGBUF_STANDARD);
    6691        95740 :         XLogRegisterBufData(0, src, newlen);
    6692              : 
    6693              :         /* inplace updates aren't decoded atm, don't log the origin */
    6694              : 
    6695        95740 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
    6696              : 
    6697        95740 :         PageSetLSN(page, recptr);
    6698              :     }
    6699              : 
    6700        95748 :     memcpy(dst, src, newlen);
    6701              : 
    6702        95748 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6703              : 
    6704              :     /*
    6705              :      * Send invalidations to shared queue.  SearchSysCacheLocked1() assumes we
    6706              :      * do this before UnlockTuple().
    6707              :      */
    6708        95748 :     AtInplace_Inval();
    6709              : 
    6710        95748 :     END_CRIT_SECTION();
    6711        95748 :     UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
    6712              : 
    6713        95748 :     AcceptInvalidationMessages();   /* local processing of just-sent inval */
    6714              : 
    6715              :     /*
    6716              :      * Queue a transactional inval, for logical decoding and for third-party
    6717              :      * code that might have been relying on it since long before inplace
    6718              :      * update adopted immediate invalidation.  See README.tuplock section
    6719              :      * "Reading inplace-updated columns" for logical decoding details.
    6720              :      */
    6721        95748 :     if (!IsBootstrapProcessingMode())
    6722        79683 :         CacheInvalidateHeapTuple(relation, tuple, NULL);
    6723        95748 : }
    6724              : 
    6725              : /*
    6726              :  * heap_inplace_unlock - reverse of heap_inplace_lock
    6727              :  */
    6728              : void
    6729       118453 : heap_inplace_unlock(Relation relation,
    6730              :                     HeapTuple oldtup, Buffer buffer)
    6731              : {
    6732       118453 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6733       118453 :     UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
    6734       118453 :     ForgetInplace_Inval();
    6735       118453 : }
    6736              : 
    6737              : #define     FRM_NOOP                0x0001
    6738              : #define     FRM_INVALIDATE_XMAX     0x0002
    6739              : #define     FRM_RETURN_IS_XID       0x0004
    6740              : #define     FRM_RETURN_IS_MULTI     0x0008
    6741              : #define     FRM_MARK_COMMITTED      0x0010
    6742              : 
    6743              : /*
    6744              :  * FreezeMultiXactId
    6745              :  *      Determine what to do during freezing when a tuple is marked by a
    6746              :  *      MultiXactId.
    6747              :  *
    6748              :  * "flags" is an output value; it's used to tell caller what to do on return.
    6749              :  * "pagefrz" is an input/output value, used to manage page level freezing.
    6750              :  *
    6751              :  * Possible values that we can set in "flags":
    6752              :  * FRM_NOOP
    6753              :  *      don't do anything -- keep existing Xmax
    6754              :  * FRM_INVALIDATE_XMAX
    6755              :  *      mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
    6756              :  * FRM_RETURN_IS_XID
    6757              :  *      The Xid return value is a single update Xid to set as xmax.
    6758              :  * FRM_MARK_COMMITTED
    6759              :  *      Xmax can be marked as HEAP_XMAX_COMMITTED
    6760              :  * FRM_RETURN_IS_MULTI
    6761              :  *      The return value is a new MultiXactId to set as new Xmax.
    6762              :  *      (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
    6763              :  *
    6764              :  * Caller delegates control of page freezing to us.  In practice we always
    6765              :  * force freezing of caller's page unless FRM_NOOP processing is indicated.
    6766              :  * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
    6767              :  * can never be left behind.  We freely choose when and how to process each
    6768              :  * Multi, without ever violating the cutoff postconditions for freezing.
    6769              :  *
    6770              :  * It's useful to remove Multis on a proactive timeline (relative to freezing
    6771              :  * XIDs) to keep MultiXact member SLRU buffer misses to a minimum.  It can also
    6772              :  * be cheaper in the short run, for us, since we too can avoid SLRU buffer
    6773              :  * misses through eager processing.
    6774              :  *
    6775              :  * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
    6776              :  * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
    6777              :  * This can usually be put off, which is usually enough to avoid it altogether.
    6778              :  * Allocating new multis during VACUUM should be avoided on general principle;
    6779              :  * only VACUUM can advance relminmxid, so allocating new Multis here comes with
    6780              :  * its own special risks.
    6781              :  *
    6782              :  * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
    6783              :  * using heap_tuple_should_freeze when we haven't forced page-level freezing.
    6784              :  *
    6785              :  * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
    6786              :  * have already forced page-level freezing, since that might incur the same
    6787              :  * SLRU buffer misses that we specifically intended to avoid by freezing.
    6788              :  */
    6789              : static TransactionId
    6790            6 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
    6791              :                   const struct VacuumCutoffs *cutoffs, uint16 *flags,
    6792              :                   HeapPageFreeze *pagefrz)
    6793              : {
    6794              :     TransactionId newxmax;
    6795              :     MultiXactMember *members;
    6796              :     int         nmembers;
    6797              :     bool        need_replace;
    6798              :     int         nnewmembers;
    6799              :     MultiXactMember *newmembers;
    6800              :     bool        has_lockers;
    6801              :     TransactionId update_xid;
    6802              :     bool        update_committed;
    6803              :     TransactionId FreezePageRelfrozenXid;
    6804              : 
    6805            6 :     *flags = 0;
    6806              : 
    6807              :     /* We should only be called in Multis */
    6808              :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    6809              : 
    6810           12 :     if (!MultiXactIdIsValid(multi) ||
    6811            6 :         HEAP_LOCKED_UPGRADED(t_infomask))
    6812              :     {
    6813            0 :         *flags |= FRM_INVALIDATE_XMAX;
    6814            0 :         pagefrz->freeze_required = true;
    6815            0 :         return InvalidTransactionId;
    6816              :     }
    6817            6 :     else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
    6818            0 :         ereport(ERROR,
    6819              :                 (errcode(ERRCODE_DATA_CORRUPTED),
    6820              :                  errmsg_internal("found multixact %u from before relminmxid %u",
    6821              :                                  multi, cutoffs->relminmxid)));
    6822            6 :     else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
    6823              :     {
    6824              :         TransactionId update_xact;
    6825              : 
    6826              :         /*
    6827              :          * This old multi cannot possibly have members still running, but
    6828              :          * verify just in case.  If it was a locker only, it can be removed
    6829              :          * without any further consideration; but if it contained an update,
    6830              :          * we might need to preserve it.
    6831              :          */
    6832            4 :         if (MultiXactIdIsRunning(multi,
    6833            4 :                                  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
    6834            0 :             ereport(ERROR,
    6835              :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6836              :                      errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
    6837              :                                      multi, cutoffs->OldestMxact)));
    6838              : 
    6839            4 :         if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
    6840              :         {
    6841            4 :             *flags |= FRM_INVALIDATE_XMAX;
    6842            4 :             pagefrz->freeze_required = true;
    6843            4 :             return InvalidTransactionId;
    6844              :         }
    6845              : 
    6846              :         /* replace multi with single XID for its updater? */
    6847            0 :         update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
    6848            0 :         if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
    6849            0 :             ereport(ERROR,
    6850              :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6851              :                      errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
    6852              :                                      multi, update_xact,
    6853              :                                      cutoffs->relfrozenxid)));
    6854            0 :         else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
    6855              :         {
    6856              :             /*
    6857              :              * Updater XID has to have aborted (otherwise the tuple would have
    6858              :              * been pruned away instead, since updater XID is < OldestXmin).
    6859              :              * Just remove xmax.
    6860              :              */
    6861            0 :             if (TransactionIdDidCommit(update_xact))
    6862            0 :                 ereport(ERROR,
    6863              :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6864              :                          errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
    6865              :                                          multi, update_xact,
    6866              :                                          cutoffs->OldestXmin)));
    6867            0 :             *flags |= FRM_INVALIDATE_XMAX;
    6868            0 :             pagefrz->freeze_required = true;
    6869            0 :             return InvalidTransactionId;
    6870              :         }
    6871              : 
    6872              :         /* Have to keep updater XID as new xmax */
    6873            0 :         *flags |= FRM_RETURN_IS_XID;
    6874            0 :         pagefrz->freeze_required = true;
    6875            0 :         return update_xact;
    6876              :     }
    6877              : 
    6878              :     /*
    6879              :      * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
    6880              :      * need to walk the whole members array to figure out what to do, if
    6881              :      * anything.
    6882              :      */
    6883              :     nmembers =
    6884            2 :         GetMultiXactIdMembers(multi, &members, false,
    6885            2 :                               HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
    6886            2 :     if (nmembers <= 0)
    6887              :     {
    6888              :         /* Nothing worth keeping */
    6889            0 :         *flags |= FRM_INVALIDATE_XMAX;
    6890            0 :         pagefrz->freeze_required = true;
    6891            0 :         return InvalidTransactionId;
    6892              :     }
    6893              : 
    6894              :     /*
    6895              :      * The FRM_NOOP case is the only case where we might need to ratchet back
    6896              :      * FreezePageRelfrozenXid or FreezePageRelminMxid.  It is also the only
    6897              :      * case where our caller might ratchet back its NoFreezePageRelfrozenXid
    6898              :      * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
    6899              :      * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
    6900              :      * trackers managed by VACUUM being ratcheting back by xmax to the degree
    6901              :      * required to make it safe to leave xmax undisturbed, independent of
    6902              :      * whether or not page freezing is triggered somewhere else.
    6903              :      *
    6904              :      * Our policy is to force freezing in every case other than FRM_NOOP,
    6905              :      * which obviates the need to maintain either set of trackers, anywhere.
    6906              :      * Every other case will reliably execute a freeze plan for xmax that
    6907              :      * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
    6908              :      * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
    6909              :      * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
    6910              :      * OldestXmin/OldestMxact, so later values never need to be tracked here.)
    6911              :      */
    6912            2 :     need_replace = false;
    6913            2 :     FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
    6914            4 :     for (int i = 0; i < nmembers; i++)
    6915              :     {
    6916            3 :         TransactionId xid = members[i].xid;
    6917              : 
    6918              :         Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
    6919              : 
    6920            3 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    6921              :         {
    6922              :             /* Can't violate the FreezeLimit postcondition */
    6923            1 :             need_replace = true;
    6924            1 :             break;
    6925              :         }
    6926            2 :         if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
    6927            0 :             FreezePageRelfrozenXid = xid;
    6928              :     }
    6929              : 
    6930              :     /* Can't violate the MultiXactCutoff postcondition, either */
    6931            2 :     if (!need_replace)
    6932            1 :         need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
    6933              : 
    6934            2 :     if (!need_replace)
    6935              :     {
    6936              :         /*
    6937              :          * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
    6938              :          * both together to make it safe to retain this particular multi after
    6939              :          * freezing its page
    6940              :          */
    6941            1 :         *flags |= FRM_NOOP;
    6942            1 :         pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
    6943            1 :         if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
    6944            0 :             pagefrz->FreezePageRelminMxid = multi;
    6945            1 :         pfree(members);
    6946            1 :         return multi;
    6947              :     }
    6948              : 
    6949              :     /*
    6950              :      * Do a more thorough second pass over the multi to figure out which
    6951              :      * member XIDs actually need to be kept.  Checking the precise status of
    6952              :      * individual members might even show that we don't need to keep anything.
    6953              :      * That is quite possible even though the Multi must be >= OldestMxact,
    6954              :      * since our second pass only keeps member XIDs when it's truly necessary;
    6955              :      * even member XIDs >= OldestXmin often won't be kept by second pass.
    6956              :      */
    6957            1 :     nnewmembers = 0;
    6958            1 :     newmembers = palloc_array(MultiXactMember, nmembers);
    6959            1 :     has_lockers = false;
    6960            1 :     update_xid = InvalidTransactionId;
    6961            1 :     update_committed = false;
    6962              : 
    6963              :     /*
    6964              :      * Determine whether to keep each member xid, or to ignore it instead
    6965              :      */
    6966            3 :     for (int i = 0; i < nmembers; i++)
    6967              :     {
    6968            2 :         TransactionId xid = members[i].xid;
    6969            2 :         MultiXactStatus mstatus = members[i].status;
    6970              : 
    6971              :         Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
    6972              : 
    6973            2 :         if (!ISUPDATE_from_mxstatus(mstatus))
    6974              :         {
    6975              :             /*
    6976              :              * Locker XID (not updater XID).  We only keep lockers that are
    6977              :              * still running.
    6978              :              */
    6979            4 :             if (TransactionIdIsCurrentTransactionId(xid) ||
    6980            2 :                 TransactionIdIsInProgress(xid))
    6981              :             {
    6982            1 :                 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
    6983            0 :                     ereport(ERROR,
    6984              :                             (errcode(ERRCODE_DATA_CORRUPTED),
    6985              :                              errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
    6986              :                                              multi, xid,
    6987              :                                              cutoffs->OldestXmin)));
    6988            1 :                 newmembers[nnewmembers++] = members[i];
    6989            1 :                 has_lockers = true;
    6990              :             }
    6991              : 
    6992            2 :             continue;
    6993              :         }
    6994              : 
    6995              :         /*
    6996              :          * Updater XID (not locker XID).  Should we keep it?
    6997              :          *
    6998              :          * Since the tuple wasn't totally removed when vacuum pruned, the
    6999              :          * update Xid cannot possibly be older than OldestXmin cutoff unless
    7000              :          * the updater XID aborted.  If the updater transaction is known
    7001              :          * aborted or crashed then it's okay to ignore it, otherwise not.
    7002              :          *
    7003              :          * In any case the Multi should never contain two updaters, whatever
    7004              :          * their individual commit status.  Check for that first, in passing.
    7005              :          */
    7006            0 :         if (TransactionIdIsValid(update_xid))
    7007            0 :             ereport(ERROR,
    7008              :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7009              :                      errmsg_internal("multixact %u has two or more updating members",
    7010              :                                      multi),
    7011              :                      errdetail_internal("First updater XID=%u second updater XID=%u.",
    7012              :                                         update_xid, xid)));
    7013              : 
    7014              :         /*
    7015              :          * As with all tuple visibility routines, it's critical to test
    7016              :          * TransactionIdIsInProgress before TransactionIdDidCommit, because of
    7017              :          * race conditions explained in detail in heapam_visibility.c.
    7018              :          */
    7019            0 :         if (TransactionIdIsCurrentTransactionId(xid) ||
    7020            0 :             TransactionIdIsInProgress(xid))
    7021            0 :             update_xid = xid;
    7022            0 :         else if (TransactionIdDidCommit(xid))
    7023              :         {
    7024              :             /*
    7025              :              * The transaction committed, so we can tell caller to set
    7026              :              * HEAP_XMAX_COMMITTED.  (We can only do this because we know the
    7027              :              * transaction is not running.)
    7028              :              */
    7029            0 :             update_committed = true;
    7030            0 :             update_xid = xid;
    7031              :         }
    7032              :         else
    7033              :         {
    7034              :             /*
    7035              :              * Not in progress, not committed -- must be aborted or crashed;
    7036              :              * we can ignore it.
    7037              :              */
    7038            0 :             continue;
    7039              :         }
    7040              : 
    7041              :         /*
    7042              :          * We determined that updater must be kept -- add it to pending new
    7043              :          * members list
    7044              :          */
    7045            0 :         if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
    7046            0 :             ereport(ERROR,
    7047              :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7048              :                      errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
    7049              :                                      multi, xid, cutoffs->OldestXmin)));
    7050            0 :         newmembers[nnewmembers++] = members[i];
    7051              :     }
    7052              : 
    7053            1 :     pfree(members);
    7054              : 
    7055              :     /*
    7056              :      * Determine what to do with caller's multi based on information gathered
    7057              :      * during our second pass
    7058              :      */
    7059            1 :     if (nnewmembers == 0)
    7060              :     {
    7061              :         /* Nothing worth keeping */
    7062            0 :         *flags |= FRM_INVALIDATE_XMAX;
    7063            0 :         newxmax = InvalidTransactionId;
    7064              :     }
    7065            1 :     else if (TransactionIdIsValid(update_xid) && !has_lockers)
    7066              :     {
    7067              :         /*
    7068              :          * If there's a single member and it's an update, pass it back alone
    7069              :          * without creating a new Multi.  (XXX we could do this when there's a
    7070              :          * single remaining locker, too, but that would complicate the API too
    7071              :          * much; moreover, the case with the single updater is more
    7072              :          * interesting, because those are longer-lived.)
    7073              :          */
    7074              :         Assert(nnewmembers == 1);
    7075            0 :         *flags |= FRM_RETURN_IS_XID;
    7076            0 :         if (update_committed)
    7077            0 :             *flags |= FRM_MARK_COMMITTED;
    7078            0 :         newxmax = update_xid;
    7079              :     }
    7080              :     else
    7081              :     {
    7082              :         /*
    7083              :          * Create a new multixact with the surviving members of the previous
    7084              :          * one, to set as new Xmax in the tuple
    7085              :          */
    7086            1 :         newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
    7087            1 :         *flags |= FRM_RETURN_IS_MULTI;
    7088              :     }
    7089              : 
    7090            1 :     pfree(newmembers);
    7091              : 
    7092            1 :     pagefrz->freeze_required = true;
    7093            1 :     return newxmax;
    7094              : }
    7095              : 
    7096              : /*
    7097              :  * heap_prepare_freeze_tuple
    7098              :  *
    7099              :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    7100              :  * are older than the OldestXmin and/or OldestMxact freeze cutoffs.  If so,
    7101              :  * setup enough state (in the *frz output argument) to enable caller to
    7102              :  * process this tuple as part of freezing its page, and return true.  Return
    7103              :  * false if nothing can be changed about the tuple right now.
    7104              :  *
    7105              :  * FreezePageConflictXid is advanced only for xmin/xvac freezing, not for xmax
    7106              :  * changes. We only remove xmax state here when it is lock-only, or when the
    7107              :  * updater XID (including an updater member of a MultiXact) must be aborted;
    7108              :  * otherwise, the tuple would already be removable. Neither case affects
    7109              :  * visibility on a standby.
    7110              :  *
    7111              :  * Also sets *totally_frozen to true if the tuple will be totally frozen once
    7112              :  * caller executes returned freeze plan (or if the tuple was already totally
    7113              :  * frozen by an earlier VACUUM).  This indicates that there are no remaining
    7114              :  * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
    7115              :  *
    7116              :  * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
    7117              :  * tuple that we returned true for, and then execute freezing.  Caller must
    7118              :  * initialize pagefrz fields for page as a whole before first call here for
    7119              :  * each heap page.
    7120              :  *
    7121              :  * VACUUM caller decides on whether or not to freeze the page as a whole.
    7122              :  * We'll often prepare freeze plans for a page that caller just discards.
    7123              :  * However, VACUUM doesn't always get to make a choice; it must freeze when
    7124              :  * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
    7125              :  * MXIDs < MultiXactCutoff) can never be left behind.  We help to make sure
    7126              :  * that VACUUM always follows that rule.
    7127              :  *
    7128              :  * We sometimes force freezing of xmax MultiXactId values long before it is
    7129              :  * strictly necessary to do so just to ensure the FreezeLimit postcondition.
    7130              :  * It's worth processing MultiXactIds proactively when it is cheap to do so,
    7131              :  * and it's convenient to make that happen by piggy-backing it on the "force
    7132              :  * freezing" mechanism.  Conversely, we sometimes delay freezing MultiXactIds
    7133              :  * because it is expensive right now (though only when it's still possible to
    7134              :  * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
    7135              :  *
    7136              :  * It is assumed that the caller has checked the tuple with
    7137              :  * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
    7138              :  * (else we should be removing the tuple, not freezing it).
    7139              :  *
    7140              :  * NB: This function has side effects: it might allocate a new MultiXactId.
    7141              :  * It will be set as tuple's new xmax when our *frz output is processed within
    7142              :  * heap_execute_freeze_tuple later on.  If the tuple is in a shared buffer
    7143              :  * then caller had better have an exclusive lock on it already.
    7144              :  */
    7145              : bool
    7146     10354946 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
    7147              :                           const struct VacuumCutoffs *cutoffs,
    7148              :                           HeapPageFreeze *pagefrz,
    7149              :                           HeapTupleFreeze *frz, bool *totally_frozen)
    7150              : {
    7151     10354946 :     bool        xmin_already_frozen = false,
    7152     10354946 :                 xmax_already_frozen = false;
    7153     10354946 :     bool        freeze_xmin = false,
    7154     10354946 :                 replace_xvac = false,
    7155     10354946 :                 replace_xmax = false,
    7156     10354946 :                 freeze_xmax = false;
    7157              :     TransactionId xid;
    7158              : 
    7159     10354946 :     frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
    7160     10354946 :     frz->t_infomask2 = tuple->t_infomask2;
    7161     10354946 :     frz->t_infomask = tuple->t_infomask;
    7162     10354946 :     frz->frzflags = 0;
    7163     10354946 :     frz->checkflags = 0;
    7164              : 
    7165              :     /*
    7166              :      * Process xmin, while keeping track of whether it's already frozen, or
    7167              :      * will become frozen iff our freeze plan is executed by caller (could be
    7168              :      * neither).
    7169              :      */
    7170     10354946 :     xid = HeapTupleHeaderGetXmin(tuple);
    7171     10354946 :     if (!TransactionIdIsNormal(xid))
    7172      6411917 :         xmin_already_frozen = true;
    7173              :     else
    7174              :     {
    7175      3943029 :         if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
    7176            0 :             ereport(ERROR,
    7177              :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7178              :                      errmsg_internal("found xmin %u from before relfrozenxid %u",
    7179              :                                      xid, cutoffs->relfrozenxid)));
    7180              : 
    7181              :         /* Will set freeze_xmin flags in freeze plan below */
    7182      3943029 :         freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
    7183              : 
    7184              :         /* Verify that xmin committed if and when freeze plan is executed */
    7185      3943029 :         if (freeze_xmin)
    7186              :         {
    7187      3149178 :             frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
    7188      3149178 :             if (TransactionIdFollows(xid, pagefrz->FreezePageConflictXid))
    7189       487530 :                 pagefrz->FreezePageConflictXid = xid;
    7190              :         }
    7191              :     }
    7192              : 
    7193              :     /*
    7194              :      * Old-style VACUUM FULL is gone, but we have to process xvac for as long
    7195              :      * as we support having MOVED_OFF/MOVED_IN tuples in the database
    7196              :      */
    7197     10354946 :     xid = HeapTupleHeaderGetXvac(tuple);
    7198     10354946 :     if (TransactionIdIsNormal(xid))
    7199              :     {
    7200              :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7201              :         Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
    7202              : 
    7203              :         /*
    7204              :          * For Xvac, we always freeze proactively.  This allows totally_frozen
    7205              :          * tracking to ignore xvac.
    7206              :          */
    7207            0 :         replace_xvac = pagefrz->freeze_required = true;
    7208              : 
    7209            0 :         if (TransactionIdFollows(xid, pagefrz->FreezePageConflictXid))
    7210            0 :             pagefrz->FreezePageConflictXid = xid;
    7211              : 
    7212              :         /* Will set replace_xvac flags in freeze plan below */
    7213              :     }
    7214              : 
    7215              :     /* Now process xmax */
    7216     10354946 :     xid = frz->xmax;
    7217     10354946 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7218              :     {
    7219              :         /* Raw xmax is a MultiXactId */
    7220              :         TransactionId newxmax;
    7221              :         uint16      flags;
    7222              : 
    7223              :         /*
    7224              :          * We will either remove xmax completely (in the "freeze_xmax" path),
    7225              :          * process xmax by replacing it (in the "replace_xmax" path), or
    7226              :          * perform no-op xmax processing.  The only constraint is that the
    7227              :          * FreezeLimit/MultiXactCutoff postcondition must never be violated.
    7228              :          */
    7229            6 :         newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
    7230              :                                     &flags, pagefrz);
    7231              : 
    7232            6 :         if (flags & FRM_NOOP)
    7233              :         {
    7234              :             /*
    7235              :              * xmax is a MultiXactId, and nothing about it changes for now.
    7236              :              * This is the only case where 'freeze_required' won't have been
    7237              :              * set for us by FreezeMultiXactId, as well as the only case where
    7238              :              * neither freeze_xmax nor replace_xmax are set (given a multi).
    7239              :              *
    7240              :              * This is a no-op, but the call to FreezeMultiXactId might have
    7241              :              * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
    7242              :              * for us (the "freeze page" variants, specifically).  That'll
    7243              :              * make it safe for our caller to freeze the page later on, while
    7244              :              * leaving this particular xmax undisturbed.
    7245              :              *
    7246              :              * FreezeMultiXactId is _not_ responsible for the "no freeze"
    7247              :              * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
    7248              :              * job.  A call to heap_tuple_should_freeze for this same tuple
    7249              :              * will take place below if 'freeze_required' isn't set already.
    7250              :              * (This repeats work from FreezeMultiXactId, but allows "no
    7251              :              * freeze" tracker maintenance to happen in only one place.)
    7252              :              */
    7253              :             Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
    7254              :             Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
    7255              :         }
    7256            5 :         else if (flags & FRM_RETURN_IS_XID)
    7257              :         {
    7258              :             /*
    7259              :              * xmax will become an updater Xid (original MultiXact's updater
    7260              :              * member Xid will be carried forward as a simple Xid in Xmax).
    7261              :              */
    7262              :             Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
    7263              : 
    7264              :             /*
    7265              :              * NB -- some of these transformations are only valid because we
    7266              :              * know the return Xid is a tuple updater (i.e. not merely a
    7267              :              * locker.) Also note that the only reason we don't explicitly
    7268              :              * worry about HEAP_KEYS_UPDATED is because it lives in
    7269              :              * t_infomask2 rather than t_infomask.
    7270              :              */
    7271            0 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    7272            0 :             frz->xmax = newxmax;
    7273            0 :             if (flags & FRM_MARK_COMMITTED)
    7274            0 :                 frz->t_infomask |= HEAP_XMAX_COMMITTED;
    7275            0 :             replace_xmax = true;
    7276              :         }
    7277            5 :         else if (flags & FRM_RETURN_IS_MULTI)
    7278              :         {
    7279              :             uint16      newbits;
    7280              :             uint16      newbits2;
    7281              : 
    7282              :             /*
    7283              :              * xmax is an old MultiXactId that we have to replace with a new
    7284              :              * MultiXactId, to carry forward two or more original member XIDs.
    7285              :              */
    7286              :             Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
    7287              : 
    7288              :             /*
    7289              :              * We can't use GetMultiXactIdHintBits directly on the new multi
    7290              :              * here; that routine initializes the masks to all zeroes, which
    7291              :              * would lose other bits we need.  Doing it this way ensures all
    7292              :              * unrelated bits remain untouched.
    7293              :              */
    7294            1 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    7295            1 :             frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    7296            1 :             GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
    7297            1 :             frz->t_infomask |= newbits;
    7298            1 :             frz->t_infomask2 |= newbits2;
    7299            1 :             frz->xmax = newxmax;
    7300            1 :             replace_xmax = true;
    7301              :         }
    7302              :         else
    7303              :         {
    7304              :             /*
    7305              :              * Freeze plan for tuple "freezes xmax" in the strictest sense:
    7306              :              * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
    7307              :              */
    7308              :             Assert(flags & FRM_INVALIDATE_XMAX);
    7309              :             Assert(!TransactionIdIsValid(newxmax));
    7310              : 
    7311              :             /* Will set freeze_xmax flags in freeze plan below */
    7312            4 :             freeze_xmax = true;
    7313              :         }
    7314              : 
    7315              :         /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
    7316              :         Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
    7317              :     }
    7318     10354940 :     else if (TransactionIdIsNormal(xid))
    7319              :     {
    7320              :         /* Raw xmax is normal XID */
    7321      4420222 :         if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
    7322            0 :             ereport(ERROR,
    7323              :                     (errcode(ERRCODE_DATA_CORRUPTED),
    7324              :                      errmsg_internal("found xmax %u from before relfrozenxid %u",
    7325              :                                      xid, cutoffs->relfrozenxid)));
    7326              : 
    7327              :         /* Will set freeze_xmax flags in freeze plan below */
    7328      4420222 :         freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
    7329              : 
    7330              :         /*
    7331              :          * Verify that xmax aborted if and when freeze plan is executed,
    7332              :          * provided it's from an update. (A lock-only xmax can be removed
    7333              :          * independent of this, since the lock is released at xact end.)
    7334              :          */
    7335      4420222 :         if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
    7336         1427 :             frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
    7337              :     }
    7338      5934718 :     else if (!TransactionIdIsValid(xid))
    7339              :     {
    7340              :         /* Raw xmax is InvalidTransactionId XID */
    7341              :         Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
    7342      5934718 :         xmax_already_frozen = true;
    7343              :     }
    7344              :     else
    7345            0 :         ereport(ERROR,
    7346              :                 (errcode(ERRCODE_DATA_CORRUPTED),
    7347              :                  errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
    7348              :                                  xid, tuple->t_infomask)));
    7349              : 
    7350     10354946 :     if (freeze_xmin)
    7351              :     {
    7352              :         Assert(!xmin_already_frozen);
    7353              : 
    7354      3149178 :         frz->t_infomask |= HEAP_XMIN_FROZEN;
    7355              :     }
    7356     10354946 :     if (replace_xvac)
    7357              :     {
    7358              :         /*
    7359              :          * If a MOVED_OFF tuple is not dead, the xvac transaction must have
    7360              :          * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
    7361              :          * transaction succeeded.
    7362              :          */
    7363              :         Assert(pagefrz->freeze_required);
    7364            0 :         if (tuple->t_infomask & HEAP_MOVED_OFF)
    7365            0 :             frz->frzflags |= XLH_INVALID_XVAC;
    7366              :         else
    7367            0 :             frz->frzflags |= XLH_FREEZE_XVAC;
    7368              :     }
    7369              :     if (replace_xmax)
    7370              :     {
    7371              :         Assert(!xmax_already_frozen && !freeze_xmax);
    7372              :         Assert(pagefrz->freeze_required);
    7373              : 
    7374              :         /* Already set replace_xmax flags in freeze plan earlier */
    7375              :     }
    7376     10354946 :     if (freeze_xmax)
    7377              :     {
    7378              :         Assert(!xmax_already_frozen && !replace_xmax);
    7379              : 
    7380         2413 :         frz->xmax = InvalidTransactionId;
    7381              : 
    7382              :         /*
    7383              :          * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
    7384              :          * LOCKED.  Normalize to INVALID just to be sure no one gets confused.
    7385              :          * Also get rid of the HEAP_KEYS_UPDATED bit.
    7386              :          */
    7387         2413 :         frz->t_infomask &= ~HEAP_XMAX_BITS;
    7388         2413 :         frz->t_infomask |= HEAP_XMAX_INVALID;
    7389         2413 :         frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
    7390         2413 :         frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    7391              :     }
    7392              : 
    7393              :     /*
    7394              :      * Determine if this tuple is already totally frozen, or will become
    7395              :      * totally frozen (provided caller executes freeze plans for the page)
    7396              :      */
    7397     19913628 :     *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
    7398      9558682 :                        (freeze_xmax || xmax_already_frozen));
    7399              : 
    7400     10354946 :     if (!pagefrz->freeze_required && !(xmin_already_frozen &&
    7401              :                                        xmax_already_frozen))
    7402              :     {
    7403              :         /*
    7404              :          * So far no previous tuple from the page made freezing mandatory.
    7405              :          * Does this tuple force caller to freeze the entire page?
    7406              :          */
    7407      6572494 :         pagefrz->freeze_required =
    7408      6572494 :             heap_tuple_should_freeze(tuple, cutoffs,
    7409              :                                      &pagefrz->NoFreezePageRelfrozenXid,
    7410              :                                      &pagefrz->NoFreezePageRelminMxid);
    7411              :     }
    7412              : 
    7413              :     /* Tell caller if this tuple has a usable freeze plan set in *frz */
    7414     10354946 :     return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
    7415              : }
    7416              : 
    7417              : /*
    7418              :  * Perform xmin/xmax XID status sanity checks before actually executing freeze
    7419              :  * plans.
    7420              :  *
    7421              :  * heap_prepare_freeze_tuple doesn't perform these checks directly because
    7422              :  * pg_xact lookups are relatively expensive.  They shouldn't be repeated by
    7423              :  * successive VACUUMs that each decide against freezing the same page.
    7424              :  */
    7425              : void
    7426        23996 : heap_pre_freeze_checks(Buffer buffer,
    7427              :                        HeapTupleFreeze *tuples, int ntuples)
    7428              : {
    7429        23996 :     Page        page = BufferGetPage(buffer);
    7430              : 
    7431      1092419 :     for (int i = 0; i < ntuples; i++)
    7432              :     {
    7433      1068423 :         HeapTupleFreeze *frz = tuples + i;
    7434      1068423 :         ItemId      itemid = PageGetItemId(page, frz->offset);
    7435              :         HeapTupleHeader htup;
    7436              : 
    7437      1068423 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
    7438              : 
    7439              :         /* Deliberately avoid relying on tuple hint bits here */
    7440      1068423 :         if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
    7441              :         {
    7442      1068422 :             TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
    7443              : 
    7444              :             Assert(!HeapTupleHeaderXminFrozen(htup));
    7445      1068422 :             if (unlikely(!TransactionIdDidCommit(xmin)))
    7446            0 :                 ereport(ERROR,
    7447              :                         (errcode(ERRCODE_DATA_CORRUPTED),
    7448              :                          errmsg_internal("uncommitted xmin %u needs to be frozen",
    7449              :                                          xmin)));
    7450              :         }
    7451              : 
    7452              :         /*
    7453              :          * TransactionIdDidAbort won't work reliably in the presence of XIDs
    7454              :          * left behind by transactions that were in progress during a crash,
    7455              :          * so we can only check that xmax didn't commit
    7456              :          */
    7457      1068423 :         if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
    7458              :         {
    7459          414 :             TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
    7460              : 
    7461              :             Assert(TransactionIdIsNormal(xmax));
    7462          414 :             if (unlikely(TransactionIdDidCommit(xmax)))
    7463            0 :                 ereport(ERROR,
    7464              :                         (errcode(ERRCODE_DATA_CORRUPTED),
    7465              :                          errmsg_internal("cannot freeze committed xmax %u",
    7466              :                                          xmax)));
    7467              :         }
    7468              :     }
    7469        23996 : }
    7470              : 
    7471              : /*
    7472              :  * Helper which executes freezing of one or more heap tuples on a page on
    7473              :  * behalf of caller.  Caller passes an array of tuple plans from
    7474              :  * heap_prepare_freeze_tuple.  Caller must set 'offset' in each plan for us.
    7475              :  * Must be called in a critical section that also marks the buffer dirty and,
    7476              :  * if needed, emits WAL.
    7477              :  */
    7478              : void
    7479        23996 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
    7480              : {
    7481        23996 :     Page        page = BufferGetPage(buffer);
    7482              : 
    7483      1092419 :     for (int i = 0; i < ntuples; i++)
    7484              :     {
    7485      1068423 :         HeapTupleFreeze *frz = tuples + i;
    7486      1068423 :         ItemId      itemid = PageGetItemId(page, frz->offset);
    7487              :         HeapTupleHeader htup;
    7488              : 
    7489      1068423 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
    7490      1068423 :         heap_execute_freeze_tuple(htup, frz);
    7491              :     }
    7492        23996 : }
    7493              : 
    7494              : /*
    7495              :  * heap_freeze_tuple
    7496              :  *      Freeze tuple in place, without WAL logging.
    7497              :  *
    7498              :  * Useful for callers like CLUSTER that perform their own WAL logging.
    7499              :  */
    7500              : bool
    7501       466739 : heap_freeze_tuple(HeapTupleHeader tuple,
    7502              :                   TransactionId relfrozenxid, TransactionId relminmxid,
    7503              :                   TransactionId FreezeLimit, TransactionId MultiXactCutoff)
    7504              : {
    7505              :     HeapTupleFreeze frz;
    7506              :     bool        do_freeze;
    7507              :     bool        totally_frozen;
    7508              :     struct VacuumCutoffs cutoffs;
    7509              :     HeapPageFreeze pagefrz;
    7510              : 
    7511       466739 :     cutoffs.relfrozenxid = relfrozenxid;
    7512       466739 :     cutoffs.relminmxid = relminmxid;
    7513       466739 :     cutoffs.OldestXmin = FreezeLimit;
    7514       466739 :     cutoffs.OldestMxact = MultiXactCutoff;
    7515       466739 :     cutoffs.FreezeLimit = FreezeLimit;
    7516       466739 :     cutoffs.MultiXactCutoff = MultiXactCutoff;
    7517              : 
    7518       466739 :     pagefrz.freeze_required = true;
    7519       466739 :     pagefrz.FreezePageRelfrozenXid = FreezeLimit;
    7520       466739 :     pagefrz.FreezePageRelminMxid = MultiXactCutoff;
    7521       466739 :     pagefrz.FreezePageConflictXid = InvalidTransactionId;
    7522       466739 :     pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
    7523       466739 :     pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
    7524              : 
    7525       466739 :     do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
    7526              :                                           &pagefrz, &frz, &totally_frozen);
    7527              : 
    7528              :     /*
    7529              :      * Note that because this is not a WAL-logged operation, we don't need to
    7530              :      * fill in the offset in the freeze record.
    7531              :      */
    7532              : 
    7533       466739 :     if (do_freeze)
    7534       361791 :         heap_execute_freeze_tuple(tuple, &frz);
    7535       466739 :     return do_freeze;
    7536              : }
    7537              : 
    7538              : /*
    7539              :  * For a given MultiXactId, return the hint bits that should be set in the
    7540              :  * tuple's infomask.
    7541              :  *
    7542              :  * Normally this should be called for a multixact that was just created, and
    7543              :  * so is on our local cache, so the GetMembers call is fast.
    7544              :  */
    7545              : static void
    7546        76862 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
    7547              :                        uint16 *new_infomask2)
    7548              : {
    7549              :     int         nmembers;
    7550              :     MultiXactMember *members;
    7551              :     int         i;
    7552        76862 :     uint16      bits = HEAP_XMAX_IS_MULTI;
    7553        76862 :     uint16      bits2 = 0;
    7554        76862 :     bool        has_update = false;
    7555        76862 :     LockTupleMode strongest = LockTupleKeyShare;
    7556              : 
    7557              :     /*
    7558              :      * We only use this in multis we just created, so they cannot be values
    7559              :      * pre-pg_upgrade.
    7560              :      */
    7561        76862 :     nmembers = GetMultiXactIdMembers(multi, &members, false, false);
    7562              : 
    7563      1472794 :     for (i = 0; i < nmembers; i++)
    7564              :     {
    7565              :         LockTupleMode mode;
    7566              : 
    7567              :         /*
    7568              :          * Remember the strongest lock mode held by any member of the
    7569              :          * multixact.
    7570              :          */
    7571      1395932 :         mode = TUPLOCK_from_mxstatus(members[i].status);
    7572      1395932 :         if (mode > strongest)
    7573         2924 :             strongest = mode;
    7574              : 
    7575              :         /* See what other bits we need */
    7576      1395932 :         switch (members[i].status)
    7577              :         {
    7578      1393512 :             case MultiXactStatusForKeyShare:
    7579              :             case MultiXactStatusForShare:
    7580              :             case MultiXactStatusForNoKeyUpdate:
    7581      1393512 :                 break;
    7582              : 
    7583           53 :             case MultiXactStatusForUpdate:
    7584           53 :                 bits2 |= HEAP_KEYS_UPDATED;
    7585           53 :                 break;
    7586              : 
    7587         2357 :             case MultiXactStatusNoKeyUpdate:
    7588         2357 :                 has_update = true;
    7589         2357 :                 break;
    7590              : 
    7591           10 :             case MultiXactStatusUpdate:
    7592           10 :                 bits2 |= HEAP_KEYS_UPDATED;
    7593           10 :                 has_update = true;
    7594           10 :                 break;
    7595              :         }
    7596              :     }
    7597              : 
    7598        76862 :     if (strongest == LockTupleExclusive ||
    7599              :         strongest == LockTupleNoKeyExclusive)
    7600         2448 :         bits |= HEAP_XMAX_EXCL_LOCK;
    7601        74414 :     else if (strongest == LockTupleShare)
    7602          473 :         bits |= HEAP_XMAX_SHR_LOCK;
    7603        73941 :     else if (strongest == LockTupleKeyShare)
    7604        73941 :         bits |= HEAP_XMAX_KEYSHR_LOCK;
    7605              : 
    7606        76862 :     if (!has_update)
    7607        74495 :         bits |= HEAP_XMAX_LOCK_ONLY;
    7608              : 
    7609        76862 :     if (nmembers > 0)
    7610        76862 :         pfree(members);
    7611              : 
    7612        76862 :     *new_infomask = bits;
    7613        76862 :     *new_infomask2 = bits2;
    7614        76862 : }
    7615              : 
    7616              : /*
    7617              :  * MultiXactIdGetUpdateXid
    7618              :  *
    7619              :  * Given a multixact Xmax and corresponding infomask, which does not have the
    7620              :  * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
    7621              :  * transaction.
    7622              :  *
    7623              :  * Caller is expected to check the status of the updating transaction, if
    7624              :  * necessary.
    7625              :  */
    7626              : static TransactionId
    7627       162089 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
    7628              : {
    7629       162089 :     TransactionId update_xact = InvalidTransactionId;
    7630              :     MultiXactMember *members;
    7631              :     int         nmembers;
    7632              : 
    7633              :     Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
    7634              :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    7635              : 
    7636              :     /*
    7637              :      * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
    7638              :      * pre-pg_upgrade.
    7639              :      */
    7640       162089 :     nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
    7641              : 
    7642       162089 :     if (nmembers > 0)
    7643              :     {
    7644              :         int         i;
    7645              : 
    7646       245987 :         for (i = 0; i < nmembers; i++)
    7647              :         {
    7648              :             /* Ignore lockers */
    7649       245987 :             if (!ISUPDATE_from_mxstatus(members[i].status))
    7650        83898 :                 continue;
    7651              : 
    7652              :             /* there can be at most one updater */
    7653              :             Assert(update_xact == InvalidTransactionId);
    7654       162089 :             update_xact = members[i].xid;
    7655              : #ifndef USE_ASSERT_CHECKING
    7656              : 
    7657              :             /*
    7658              :              * in an assert-enabled build, walk the whole array to ensure
    7659              :              * there's no other updater.
    7660              :              */
    7661       162089 :             break;
    7662              : #endif
    7663              :         }
    7664              : 
    7665       162089 :         pfree(members);
    7666              :     }
    7667              : 
    7668       162089 :     return update_xact;
    7669              : }
    7670              : 
    7671              : /*
    7672              :  * HeapTupleGetUpdateXid
    7673              :  *      As above, but use a HeapTupleHeader
    7674              :  *
    7675              :  * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
    7676              :  * checking the hint bits.
    7677              :  */
    7678              : TransactionId
    7679       159950 : HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
    7680              : {
    7681       159950 :     return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tup),
    7682       159950 :                                    tup->t_infomask);
    7683              : }
    7684              : 
    7685              : /*
    7686              :  * Does the given multixact conflict with the current transaction grabbing a
    7687              :  * tuple lock of the given strength?
    7688              :  *
    7689              :  * The passed infomask pairs up with the given multixact in the tuple header.
    7690              :  *
    7691              :  * If current_is_member is not NULL, it is set to 'true' if the current
    7692              :  * transaction is a member of the given multixact.
    7693              :  */
    7694              : static bool
    7695          218 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
    7696              :                         LockTupleMode lockmode, bool *current_is_member)
    7697              : {
    7698              :     int         nmembers;
    7699              :     MultiXactMember *members;
    7700          218 :     bool        result = false;
    7701          218 :     LOCKMODE    wanted = tupleLockExtraInfo[lockmode].hwlock;
    7702              : 
    7703          218 :     if (HEAP_LOCKED_UPGRADED(infomask))
    7704            0 :         return false;
    7705              : 
    7706          218 :     nmembers = GetMultiXactIdMembers(multi, &members, false,
    7707          218 :                                      HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    7708          218 :     if (nmembers >= 0)
    7709              :     {
    7710              :         int         i;
    7711              : 
    7712         2682 :         for (i = 0; i < nmembers; i++)
    7713              :         {
    7714              :             TransactionId memxid;
    7715              :             LOCKMODE    memlockmode;
    7716              : 
    7717         2471 :             if (result && (current_is_member == NULL || *current_is_member))
    7718              :                 break;
    7719              : 
    7720         2464 :             memlockmode = LOCKMODE_from_mxstatus(members[i].status);
    7721              : 
    7722              :             /* ignore members from current xact (but track their presence) */
    7723         2464 :             memxid = members[i].xid;
    7724         2464 :             if (TransactionIdIsCurrentTransactionId(memxid))
    7725              :             {
    7726           92 :                 if (current_is_member != NULL)
    7727           78 :                     *current_is_member = true;
    7728           92 :                 continue;
    7729              :             }
    7730         2372 :             else if (result)
    7731            8 :                 continue;
    7732              : 
    7733              :             /* ignore members that don't conflict with the lock we want */
    7734         2364 :             if (!DoLockModesConflict(memlockmode, wanted))
    7735         2325 :                 continue;
    7736              : 
    7737           39 :             if (ISUPDATE_from_mxstatus(members[i].status))
    7738              :             {
    7739              :                 /* ignore aborted updaters */
    7740           17 :                 if (TransactionIdDidAbort(memxid))
    7741            1 :                     continue;
    7742              :             }
    7743              :             else
    7744              :             {
    7745              :                 /* ignore lockers-only that are no longer in progress */
    7746           22 :                 if (!TransactionIdIsInProgress(memxid))
    7747            7 :                     continue;
    7748              :             }
    7749              : 
    7750              :             /*
    7751              :              * Whatever remains are either live lockers that conflict with our
    7752              :              * wanted lock, and updaters that are not aborted.  Those conflict
    7753              :              * with what we want.  Set up to return true, but keep going to
    7754              :              * look for the current transaction among the multixact members,
    7755              :              * if needed.
    7756              :              */
    7757           31 :             result = true;
    7758              :         }
    7759          218 :         pfree(members);
    7760              :     }
    7761              : 
    7762          218 :     return result;
    7763              : }
    7764              : 
    7765              : /*
    7766              :  * Do_MultiXactIdWait
    7767              :  *      Actual implementation for the two functions below.
    7768              :  *
    7769              :  * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
    7770              :  * needed to ensure we only sleep on conflicting members, and the infomask is
    7771              :  * used to optimize multixact access in case it's a lock-only multi); 'nowait'
    7772              :  * indicates whether to use conditional lock acquisition, to allow callers to
    7773              :  * fail if lock is unavailable.  'rel', 'ctid' and 'oper' are used to set up
    7774              :  * context information for error messages.  'remaining', if not NULL, receives
    7775              :  * the number of members that are still running, including any (non-aborted)
    7776              :  * subtransactions of our own transaction.  'logLockFailure' indicates whether
    7777              :  * to log details when a lock acquisition fails with 'nowait' enabled.
    7778              :  *
    7779              :  * We do this by sleeping on each member using XactLockTableWait.  Any
    7780              :  * members that belong to the current backend are *not* waited for, however;
    7781              :  * this would not merely be useless but would lead to Assert failure inside
    7782              :  * XactLockTableWait.  By the time this returns, it is certain that all
    7783              :  * transactions *of other backends* that were members of the MultiXactId
    7784              :  * that conflict with the requested status are dead (and no new ones can have
    7785              :  * been added, since it is not legal to add members to an existing
    7786              :  * MultiXactId).
    7787              :  *
    7788              :  * But by the time we finish sleeping, someone else may have changed the Xmax
    7789              :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7790              :  *
    7791              :  * Note that in case we return false, the number of remaining members is
    7792              :  * not to be trusted.
    7793              :  */
    7794              : static bool
    7795           61 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    7796              :                    uint16 infomask, bool nowait,
    7797              :                    Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
    7798              :                    int *remaining, bool logLockFailure)
    7799              : {
    7800           61 :     bool        result = true;
    7801              :     MultiXactMember *members;
    7802              :     int         nmembers;
    7803           61 :     int         remain = 0;
    7804              : 
    7805              :     /* for pre-pg_upgrade tuples, no need to sleep at all */
    7806           61 :     nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
    7807           61 :         GetMultiXactIdMembers(multi, &members, false,
    7808           61 :                               HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    7809              : 
    7810           61 :     if (nmembers >= 0)
    7811              :     {
    7812              :         int         i;
    7813              : 
    7814          192 :         for (i = 0; i < nmembers; i++)
    7815              :         {
    7816          137 :             TransactionId memxid = members[i].xid;
    7817          137 :             MultiXactStatus memstatus = members[i].status;
    7818              : 
    7819          137 :             if (TransactionIdIsCurrentTransactionId(memxid))
    7820              :             {
    7821           25 :                 remain++;
    7822           25 :                 continue;
    7823              :             }
    7824              : 
    7825          112 :             if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
    7826          112 :                                      LOCKMODE_from_mxstatus(status)))
    7827              :             {
    7828           22 :                 if (remaining && TransactionIdIsInProgress(memxid))
    7829            8 :                     remain++;
    7830           22 :                 continue;
    7831              :             }
    7832              : 
    7833              :             /*
    7834              :              * This member conflicts with our multi, so we have to sleep (or
    7835              :              * return failure, if asked to avoid waiting.)
    7836              :              *
    7837              :              * Note that we don't set up an error context callback ourselves,
    7838              :              * but instead we pass the info down to XactLockTableWait.  This
    7839              :              * might seem a bit wasteful because the context is set up and
    7840              :              * tore down for each member of the multixact, but in reality it
    7841              :              * should be barely noticeable, and it avoids duplicate code.
    7842              :              */
    7843           90 :             if (nowait)
    7844              :             {
    7845            6 :                 result = ConditionalXactLockTableWait(memxid, logLockFailure);
    7846            6 :                 if (!result)
    7847            6 :                     break;
    7848              :             }
    7849              :             else
    7850           84 :                 XactLockTableWait(memxid, rel, ctid, oper);
    7851              :         }
    7852              : 
    7853           61 :         pfree(members);
    7854              :     }
    7855              : 
    7856           61 :     if (remaining)
    7857           10 :         *remaining = remain;
    7858              : 
    7859           61 :     return result;
    7860              : }
    7861              : 
    7862              : /*
    7863              :  * MultiXactIdWait
    7864              :  *      Sleep on a MultiXactId.
    7865              :  *
    7866              :  * By the time we finish sleeping, someone else may have changed the Xmax
    7867              :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7868              :  *
    7869              :  * We return (in *remaining, if not NULL) the number of members that are still
    7870              :  * running, including any (non-aborted) subtransactions of our own transaction.
    7871              :  */
    7872              : static void
    7873           55 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
    7874              :                 Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
    7875              :                 int *remaining)
    7876              : {
    7877           55 :     (void) Do_MultiXactIdWait(multi, status, infomask, false,
    7878              :                               rel, ctid, oper, remaining, false);
    7879           55 : }
    7880              : 
    7881              : /*
    7882              :  * ConditionalMultiXactIdWait
    7883              :  *      As above, but only lock if we can get the lock without blocking.
    7884              :  *
    7885              :  * By the time we finish sleeping, someone else may have changed the Xmax
    7886              :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7887              :  *
    7888              :  * If the multixact is now all gone, return true.  Returns false if some
    7889              :  * transactions might still be running.
    7890              :  *
    7891              :  * We return (in *remaining, if not NULL) the number of members that are still
    7892              :  * running, including any (non-aborted) subtransactions of our own transaction.
    7893              :  */
    7894              : static bool
    7895            6 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    7896              :                            uint16 infomask, Relation rel, int *remaining,
    7897              :                            bool logLockFailure)
    7898              : {
    7899            6 :     return Do_MultiXactIdWait(multi, status, infomask, true,
    7900              :                               rel, NULL, XLTW_None, remaining, logLockFailure);
    7901              : }
    7902              : 
    7903              : /*
    7904              :  * heap_tuple_needs_eventual_freeze
    7905              :  *
    7906              :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    7907              :  * will eventually require freezing (if tuple isn't removed by pruning first).
    7908              :  */
    7909              : bool
    7910       118489 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
    7911              : {
    7912              :     TransactionId xid;
    7913              : 
    7914              :     /*
    7915              :      * If xmin is a normal transaction ID, this tuple is definitely not
    7916              :      * frozen.
    7917              :      */
    7918       118489 :     xid = HeapTupleHeaderGetXmin(tuple);
    7919       118489 :     if (TransactionIdIsNormal(xid))
    7920         3300 :         return true;
    7921              : 
    7922              :     /*
    7923              :      * If xmax is a valid xact or multixact, this tuple is also not frozen.
    7924              :      */
    7925       115189 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7926              :     {
    7927              :         MultiXactId multi;
    7928              : 
    7929            0 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    7930            0 :         if (MultiXactIdIsValid(multi))
    7931            0 :             return true;
    7932              :     }
    7933              :     else
    7934              :     {
    7935       115189 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    7936       115189 :         if (TransactionIdIsNormal(xid))
    7937            8 :             return true;
    7938              :     }
    7939              : 
    7940       115181 :     if (tuple->t_infomask & HEAP_MOVED)
    7941              :     {
    7942            0 :         xid = HeapTupleHeaderGetXvac(tuple);
    7943            0 :         if (TransactionIdIsNormal(xid))
    7944            0 :             return true;
    7945              :     }
    7946              : 
    7947       115181 :     return false;
    7948              : }
    7949              : 
    7950              : /*
    7951              :  * heap_tuple_should_freeze
    7952              :  *
    7953              :  * Return value indicates if heap_prepare_freeze_tuple sibling function would
    7954              :  * (or should) force freezing of the heap page that contains caller's tuple.
    7955              :  * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
    7956              :  * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
    7957              :  *
    7958              :  * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
    7959              :  * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
    7960              :  * Our working assumption is that caller won't decide to freeze this tuple.
    7961              :  * It's up to caller to only ratchet back its own top-level trackers after the
    7962              :  * point that it fully commits to not freezing the tuple/page in question.
    7963              :  */
    7964              : bool
    7965      6575538 : heap_tuple_should_freeze(HeapTupleHeader tuple,
    7966              :                          const struct VacuumCutoffs *cutoffs,
    7967              :                          TransactionId *NoFreezePageRelfrozenXid,
    7968              :                          MultiXactId *NoFreezePageRelminMxid)
    7969              : {
    7970              :     TransactionId xid;
    7971              :     MultiXactId multi;
    7972      6575538 :     bool        freeze = false;
    7973              : 
    7974              :     /* First deal with xmin */
    7975      6575538 :     xid = HeapTupleHeaderGetXmin(tuple);
    7976      6575538 :     if (TransactionIdIsNormal(xid))
    7977              :     {
    7978              :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7979      2561288 :         if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7980        24324 :             *NoFreezePageRelfrozenXid = xid;
    7981      2561288 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    7982        22076 :             freeze = true;
    7983              :     }
    7984              : 
    7985              :     /* Now deal with xmax */
    7986      6575538 :     xid = InvalidTransactionId;
    7987      6575538 :     multi = InvalidMultiXactId;
    7988      6575538 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7989            2 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    7990              :     else
    7991      6575536 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    7992              : 
    7993      6575538 :     if (TransactionIdIsNormal(xid))
    7994              :     {
    7995              :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7996              :         /* xmax is a non-permanent XID */
    7997      4343792 :         if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7998            1 :             *NoFreezePageRelfrozenXid = xid;
    7999      4343792 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    8000            5 :             freeze = true;
    8001              :     }
    8002      2231746 :     else if (!MultiXactIdIsValid(multi))
    8003              :     {
    8004              :         /* xmax is a permanent XID or invalid MultiXactId/XID */
    8005              :     }
    8006            2 :     else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
    8007              :     {
    8008              :         /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
    8009            0 :         if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
    8010            0 :             *NoFreezePageRelminMxid = multi;
    8011              :         /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
    8012            0 :         freeze = true;
    8013              :     }
    8014              :     else
    8015              :     {
    8016              :         /* xmax is a MultiXactId that may have an updater XID */
    8017              :         MultiXactMember *members;
    8018              :         int         nmembers;
    8019              : 
    8020              :         Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
    8021            2 :         if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
    8022            2 :             *NoFreezePageRelminMxid = multi;
    8023            2 :         if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
    8024            2 :             freeze = true;
    8025              : 
    8026              :         /* need to check whether any member of the mxact is old */
    8027            2 :         nmembers = GetMultiXactIdMembers(multi, &members, false,
    8028            2 :                                          HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
    8029              : 
    8030            5 :         for (int i = 0; i < nmembers; i++)
    8031              :         {
    8032            3 :             xid = members[i].xid;
    8033              :             Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    8034            3 :             if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    8035            0 :                 *NoFreezePageRelfrozenXid = xid;
    8036            3 :             if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    8037            0 :                 freeze = true;
    8038              :         }
    8039            2 :         if (nmembers > 0)
    8040            1 :             pfree(members);
    8041              :     }
    8042              : 
    8043      6575538 :     if (tuple->t_infomask & HEAP_MOVED)
    8044              :     {
    8045            0 :         xid = HeapTupleHeaderGetXvac(tuple);
    8046            0 :         if (TransactionIdIsNormal(xid))
    8047              :         {
    8048              :             Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    8049            0 :             if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    8050            0 :                 *NoFreezePageRelfrozenXid = xid;
    8051              :             /* heap_prepare_freeze_tuple forces xvac freezing */
    8052            0 :             freeze = true;
    8053              :         }
    8054              :     }
    8055              : 
    8056      6575538 :     return freeze;
    8057              : }
    8058              : 
    8059              : /*
    8060              :  * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
    8061              :  * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
    8062              :  * that caller is in the process of physically removing, e.g. via HOT pruning
    8063              :  * or index deletion.
    8064              :  *
    8065              :  * Caller must initialize its value to InvalidTransactionId, which is
    8066              :  * generally interpreted as "definitely no need for a recovery conflict".
    8067              :  * Final value must reflect all heap tuples that caller will physically remove
    8068              :  * (or remove TID references to) via its ongoing pruning/deletion operation.
    8069              :  * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
    8070              :  * caller's WAL record) by REDO routine when it replays caller's operation.
    8071              :  */
    8072              : void
    8073      1895097 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
    8074              :                                       TransactionId *snapshotConflictHorizon)
    8075              : {
    8076      1895097 :     TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
    8077      1895097 :     TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
    8078      1895097 :     TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
    8079              : 
    8080      1895097 :     if (tuple->t_infomask & HEAP_MOVED)
    8081              :     {
    8082            0 :         if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
    8083            0 :             *snapshotConflictHorizon = xvac;
    8084              :     }
    8085              : 
    8086              :     /*
    8087              :      * Ignore tuples inserted by an aborted transaction or if the tuple was
    8088              :      * updated/deleted by the inserting transaction.
    8089              :      *
    8090              :      * Look for a committed hint bit, or if no xmin bit is set, check clog.
    8091              :      */
    8092      1895097 :     if (HeapTupleHeaderXminCommitted(tuple) ||
    8093       122212 :         (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
    8094              :     {
    8095      3378337 :         if (xmax != xmin &&
    8096      1591268 :             TransactionIdFollows(xmax, *snapshotConflictHorizon))
    8097       117433 :             *snapshotConflictHorizon = xmax;
    8098              :     }
    8099      1895097 : }
    8100              : 
    8101              : #ifdef USE_PREFETCH
    8102              : /*
    8103              :  * Helper function for heap_index_delete_tuples.  Issues prefetch requests for
    8104              :  * prefetch_count buffers.  The prefetch_state keeps track of all the buffers
    8105              :  * we can prefetch, and which have already been prefetched; each call to this
    8106              :  * function picks up where the previous call left off.
    8107              :  *
    8108              :  * Note: we expect the deltids array to be sorted in an order that groups TIDs
    8109              :  * by heap block, with all TIDs for each block appearing together in exactly
    8110              :  * one group.
    8111              :  */
    8112              : static void
    8113        25328 : index_delete_prefetch_buffer(Relation rel,
    8114              :                              IndexDeletePrefetchState *prefetch_state,
    8115              :                              int prefetch_count)
    8116              : {
    8117        25328 :     BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
    8118        25328 :     int         count = 0;
    8119              :     int         i;
    8120        25328 :     int         ndeltids = prefetch_state->ndeltids;
    8121        25328 :     TM_IndexDelete *deltids = prefetch_state->deltids;
    8122              : 
    8123        25328 :     for (i = prefetch_state->next_item;
    8124       822767 :          i < ndeltids && count < prefetch_count;
    8125       797439 :          i++)
    8126              :     {
    8127       797439 :         ItemPointer htid = &deltids[i].tid;
    8128              : 
    8129      1587319 :         if (cur_hblkno == InvalidBlockNumber ||
    8130       789880 :             ItemPointerGetBlockNumber(htid) != cur_hblkno)
    8131              :         {
    8132        22320 :             cur_hblkno = ItemPointerGetBlockNumber(htid);
    8133        22320 :             PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
    8134        22320 :             count++;
    8135              :         }
    8136              :     }
    8137              : 
    8138              :     /*
    8139              :      * Save the prefetch position so that next time we can continue from that
    8140              :      * position.
    8141              :      */
    8142        25328 :     prefetch_state->next_item = i;
    8143        25328 :     prefetch_state->cur_hblkno = cur_hblkno;
    8144        25328 : }
    8145              : #endif
    8146              : 
    8147              : /*
    8148              :  * Helper function for heap_index_delete_tuples.  Checks for index corruption
    8149              :  * involving an invalid TID in index AM caller's index page.
    8150              :  *
    8151              :  * This is an ideal place for these checks.  The index AM must hold a buffer
    8152              :  * lock on the index page containing the TIDs we examine here, so we don't
    8153              :  * have to worry about concurrent VACUUMs at all.  We can be sure that the
    8154              :  * index is corrupt when htid points directly to an LP_UNUSED item or
    8155              :  * heap-only tuple, which is not the case during standard index scans.
    8156              :  */
    8157              : static inline void
    8158       678057 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
    8159              :                         Page page, OffsetNumber maxoff,
    8160              :                         const ItemPointerData *htid, TM_IndexStatus *istatus)
    8161              : {
    8162       678057 :     OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
    8163              :     ItemId      iid;
    8164              : 
    8165              :     Assert(OffsetNumberIsValid(istatus->idxoffnum));
    8166              : 
    8167       678057 :     if (unlikely(indexpagehoffnum > maxoff))
    8168            0 :         ereport(ERROR,
    8169              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    8170              :                  errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
    8171              :                                  ItemPointerGetBlockNumber(htid),
    8172              :                                  indexpagehoffnum,
    8173              :                                  istatus->idxoffnum, delstate->iblknum,
    8174              :                                  RelationGetRelationName(delstate->irel))));
    8175              : 
    8176       678057 :     iid = PageGetItemId(page, indexpagehoffnum);
    8177       678057 :     if (unlikely(!ItemIdIsUsed(iid)))
    8178            0 :         ereport(ERROR,
    8179              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    8180              :                  errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
    8181              :                                  ItemPointerGetBlockNumber(htid),
    8182              :                                  indexpagehoffnum,
    8183              :                                  istatus->idxoffnum, delstate->iblknum,
    8184              :                                  RelationGetRelationName(delstate->irel))));
    8185              : 
    8186       678057 :     if (ItemIdHasStorage(iid))
    8187              :     {
    8188              :         HeapTupleHeader htup;
    8189              : 
    8190              :         Assert(ItemIdIsNormal(iid));
    8191       398205 :         htup = (HeapTupleHeader) PageGetItem(page, iid);
    8192              : 
    8193       398205 :         if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
    8194            0 :             ereport(ERROR,
    8195              :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    8196              :                      errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
    8197              :                                      ItemPointerGetBlockNumber(htid),
    8198              :                                      indexpagehoffnum,
    8199              :                                      istatus->idxoffnum, delstate->iblknum,
    8200              :                                      RelationGetRelationName(delstate->irel))));
    8201              :     }
    8202       678057 : }
    8203              : 
    8204              : /*
    8205              :  * heapam implementation of tableam's index_delete_tuples interface.
    8206              :  *
    8207              :  * This helper function is called by index AMs during index tuple deletion.
    8208              :  * See tableam header comments for an explanation of the interface implemented
    8209              :  * here and a general theory of operation.  Note that each call here is either
    8210              :  * a simple index deletion call, or a bottom-up index deletion call.
    8211              :  *
    8212              :  * It's possible for this to generate a fair amount of I/O, since we may be
    8213              :  * deleting hundreds of tuples from a single index block.  To amortize that
    8214              :  * cost to some degree, this uses prefetching and combines repeat accesses to
    8215              :  * the same heap block.
    8216              :  */
    8217              : TransactionId
    8218         7559 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
    8219              : {
    8220              :     /* Initial assumption is that earlier pruning took care of conflict */
    8221         7559 :     TransactionId snapshotConflictHorizon = InvalidTransactionId;
    8222         7559 :     BlockNumber blkno = InvalidBlockNumber;
    8223         7559 :     Buffer      buf = InvalidBuffer;
    8224         7559 :     Page        page = NULL;
    8225         7559 :     OffsetNumber maxoff = InvalidOffsetNumber;
    8226              :     TransactionId priorXmax;
    8227              : #ifdef USE_PREFETCH
    8228              :     IndexDeletePrefetchState prefetch_state;
    8229              :     int         prefetch_distance;
    8230              : #endif
    8231              :     SnapshotData SnapshotNonVacuumable;
    8232         7559 :     int         finalndeltids = 0,
    8233         7559 :                 nblocksaccessed = 0;
    8234              : 
    8235              :     /* State that's only used in bottom-up index deletion case */
    8236         7559 :     int         nblocksfavorable = 0;
    8237         7559 :     int         curtargetfreespace = delstate->bottomupfreespace,
    8238         7559 :                 lastfreespace = 0,
    8239         7559 :                 actualfreespace = 0;
    8240         7559 :     bool        bottomup_final_block = false;
    8241              : 
    8242         7559 :     InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
    8243              : 
    8244              :     /* Sort caller's deltids array by TID for further processing */
    8245         7559 :     index_delete_sort(delstate);
    8246              : 
    8247              :     /*
    8248              :      * Bottom-up case: resort deltids array in an order attuned to where the
    8249              :      * greatest number of promising TIDs are to be found, and determine how
    8250              :      * many blocks from the start of sorted array should be considered
    8251              :      * favorable.  This will also shrink the deltids array in order to
    8252              :      * eliminate completely unfavorable blocks up front.
    8253              :      */
    8254         7559 :     if (delstate->bottomup)
    8255         2283 :         nblocksfavorable = bottomup_sort_and_shrink(delstate);
    8256              : 
    8257              : #ifdef USE_PREFETCH
    8258              :     /* Initialize prefetch state. */
    8259         7559 :     prefetch_state.cur_hblkno = InvalidBlockNumber;
    8260         7559 :     prefetch_state.next_item = 0;
    8261         7559 :     prefetch_state.ndeltids = delstate->ndeltids;
    8262         7559 :     prefetch_state.deltids = delstate->deltids;
    8263              : 
    8264              :     /*
    8265              :      * Determine the prefetch distance that we will attempt to maintain.
    8266              :      *
    8267              :      * Since the caller holds a buffer lock somewhere in rel, we'd better make
    8268              :      * sure that isn't a catalog relation before we call code that does
    8269              :      * syscache lookups, to avoid risk of deadlock.
    8270              :      */
    8271         7559 :     if (IsCatalogRelation(rel))
    8272         5804 :         prefetch_distance = maintenance_io_concurrency;
    8273              :     else
    8274              :         prefetch_distance =
    8275         1755 :             get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
    8276              : 
    8277              :     /* Cap initial prefetch distance for bottom-up deletion caller */
    8278         7559 :     if (delstate->bottomup)
    8279              :     {
    8280              :         Assert(nblocksfavorable >= 1);
    8281              :         Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
    8282         2283 :         prefetch_distance = Min(prefetch_distance, nblocksfavorable);
    8283              :     }
    8284              : 
    8285              :     /* Start prefetching. */
    8286         7559 :     index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
    8287              : #endif
    8288              : 
    8289              :     /* Iterate over deltids, determine which to delete, check their horizon */
    8290              :     Assert(delstate->ndeltids > 0);
    8291       685616 :     for (int i = 0; i < delstate->ndeltids; i++)
    8292              :     {
    8293       680340 :         TM_IndexDelete *ideltid = &delstate->deltids[i];
    8294       680340 :         TM_IndexStatus *istatus = delstate->status + ideltid->id;
    8295       680340 :         ItemPointer htid = &ideltid->tid;
    8296              :         OffsetNumber offnum;
    8297              : 
    8298              :         /*
    8299              :          * Read buffer, and perform required extra steps each time a new block
    8300              :          * is encountered.  Avoid refetching if it's the same block as the one
    8301              :          * from the last htid.
    8302              :          */
    8303      1353121 :         if (blkno == InvalidBlockNumber ||
    8304       672781 :             ItemPointerGetBlockNumber(htid) != blkno)
    8305              :         {
    8306              :             /*
    8307              :              * Consider giving up early for bottom-up index deletion caller
    8308              :              * first. (Only prefetch next-next block afterwards, when it
    8309              :              * becomes clear that we're at least going to access the next
    8310              :              * block in line.)
    8311              :              *
    8312              :              * Sometimes the first block frees so much space for bottom-up
    8313              :              * caller that the deletion process can end without accessing any
    8314              :              * more blocks.  It is usually necessary to access 2 or 3 blocks
    8315              :              * per bottom-up deletion operation, though.
    8316              :              */
    8317        20052 :             if (delstate->bottomup)
    8318              :             {
    8319              :                 /*
    8320              :                  * We often allow caller to delete a few additional items
    8321              :                  * whose entries we reached after the point that space target
    8322              :                  * from caller was satisfied.  The cost of accessing the page
    8323              :                  * was already paid at that point, so it made sense to finish
    8324              :                  * it off.  When that happened, we finalize everything here
    8325              :                  * (by finishing off the whole bottom-up deletion operation
    8326              :                  * without needlessly paying the cost of accessing any more
    8327              :                  * blocks).
    8328              :                  */
    8329         4943 :                 if (bottomup_final_block)
    8330          187 :                     break;
    8331              : 
    8332              :                 /*
    8333              :                  * Give up when we didn't enable our caller to free any
    8334              :                  * additional space as a result of processing the page that we
    8335              :                  * just finished up with.  This rule is the main way in which
    8336              :                  * we keep the cost of bottom-up deletion under control.
    8337              :                  */
    8338         4756 :                 if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
    8339         2096 :                     break;
    8340         2660 :                 lastfreespace = actualfreespace;    /* for next time */
    8341              : 
    8342              :                 /*
    8343              :                  * Deletion operation (which is bottom-up) will definitely
    8344              :                  * access the next block in line.  Prepare for that now.
    8345              :                  *
    8346              :                  * Decay target free space so that we don't hang on for too
    8347              :                  * long with a marginal case. (Space target is only truly
    8348              :                  * helpful when it allows us to recognize that we don't need
    8349              :                  * to access more than 1 or 2 blocks to satisfy caller due to
    8350              :                  * agreeable workload characteristics.)
    8351              :                  *
    8352              :                  * We are a bit more patient when we encounter contiguous
    8353              :                  * blocks, though: these are treated as favorable blocks.  The
    8354              :                  * decay process is only applied when the next block in line
    8355              :                  * is not a favorable/contiguous block.  This is not an
    8356              :                  * exception to the general rule; we still insist on finding
    8357              :                  * at least one deletable item per block accessed.  See
    8358              :                  * bottomup_nblocksfavorable() for full details of the theory
    8359              :                  * behind favorable blocks and heap block locality in general.
    8360              :                  *
    8361              :                  * Note: The first block in line is always treated as a
    8362              :                  * favorable block, so the earliest possible point that the
    8363              :                  * decay can be applied is just before we access the second
    8364              :                  * block in line.  The Assert() verifies this for us.
    8365              :                  */
    8366              :                 Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
    8367         2660 :                 if (nblocksfavorable > 0)
    8368         2473 :                     nblocksfavorable--;
    8369              :                 else
    8370          187 :                     curtargetfreespace /= 2;
    8371              :             }
    8372              : 
    8373              :             /* release old buffer */
    8374        17769 :             if (BufferIsValid(buf))
    8375        10210 :                 UnlockReleaseBuffer(buf);
    8376              : 
    8377        17769 :             blkno = ItemPointerGetBlockNumber(htid);
    8378        17769 :             buf = ReadBuffer(rel, blkno);
    8379        17769 :             nblocksaccessed++;
    8380              :             Assert(!delstate->bottomup ||
    8381              :                    nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
    8382              : 
    8383              : #ifdef USE_PREFETCH
    8384              : 
    8385              :             /*
    8386              :              * To maintain the prefetch distance, prefetch one more page for
    8387              :              * each page we read.
    8388              :              */
    8389        17769 :             index_delete_prefetch_buffer(rel, &prefetch_state, 1);
    8390              : #endif
    8391              : 
    8392        17769 :             LockBuffer(buf, BUFFER_LOCK_SHARE);
    8393              : 
    8394        17769 :             page = BufferGetPage(buf);
    8395        17769 :             maxoff = PageGetMaxOffsetNumber(page);
    8396              :         }
    8397              : 
    8398              :         /*
    8399              :          * In passing, detect index corruption involving an index page with a
    8400              :          * TID that points to a location in the heap that couldn't possibly be
    8401              :          * correct.  We only do this with actual TIDs from caller's index page
    8402              :          * (not items reached by traversing through a HOT chain).
    8403              :          */
    8404       678057 :         index_delete_check_htid(delstate, page, maxoff, htid, istatus);
    8405              : 
    8406       678057 :         if (istatus->knowndeletable)
    8407              :             Assert(!delstate->bottomup && !istatus->promising);
    8408              :         else
    8409              :         {
    8410       515553 :             ItemPointerData tmp = *htid;
    8411              :             HeapTupleData heapTuple;
    8412              : 
    8413              :             /* Are any tuples from this HOT chain non-vacuumable? */
    8414       515553 :             if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
    8415              :                                        &heapTuple, NULL, true))
    8416       287306 :                 continue;       /* can't delete entry */
    8417              : 
    8418              :             /* Caller will delete, since whole HOT chain is vacuumable */
    8419       228247 :             istatus->knowndeletable = true;
    8420              : 
    8421              :             /* Maintain index free space info for bottom-up deletion case */
    8422       228247 :             if (delstate->bottomup)
    8423              :             {
    8424              :                 Assert(istatus->freespace > 0);
    8425        10231 :                 actualfreespace += istatus->freespace;
    8426        10231 :                 if (actualfreespace >= curtargetfreespace)
    8427         2951 :                     bottomup_final_block = true;
    8428              :             }
    8429              :         }
    8430              : 
    8431              :         /*
    8432              :          * Maintain snapshotConflictHorizon value for deletion operation as a
    8433              :          * whole by advancing current value using heap tuple headers.  This is
    8434              :          * loosely based on the logic for pruning a HOT chain.
    8435              :          */
    8436       390751 :         offnum = ItemPointerGetOffsetNumber(htid);
    8437       390751 :         priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    8438              :         for (;;)
    8439        22270 :         {
    8440              :             ItemId      lp;
    8441              :             HeapTupleHeader htup;
    8442              : 
    8443              :             /* Sanity check (pure paranoia) */
    8444       413021 :             if (offnum < FirstOffsetNumber)
    8445            0 :                 break;
    8446              : 
    8447              :             /*
    8448              :              * An offset past the end of page's line pointer array is possible
    8449              :              * when the array was truncated
    8450              :              */
    8451       413021 :             if (offnum > maxoff)
    8452            0 :                 break;
    8453              : 
    8454       413021 :             lp = PageGetItemId(page, offnum);
    8455       413021 :             if (ItemIdIsRedirected(lp))
    8456              :             {
    8457        10003 :                 offnum = ItemIdGetRedirect(lp);
    8458        10003 :                 continue;
    8459              :             }
    8460              : 
    8461              :             /*
    8462              :              * We'll often encounter LP_DEAD line pointers (especially with an
    8463              :              * entry marked knowndeletable by our caller up front).  No heap
    8464              :              * tuple headers get examined for an htid that leads us to an
    8465              :              * LP_DEAD item.  This is okay because the earlier pruning
    8466              :              * operation that made the line pointer LP_DEAD in the first place
    8467              :              * must have considered the original tuple header as part of
    8468              :              * generating its own snapshotConflictHorizon value.
    8469              :              *
    8470              :              * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
    8471              :              * the same strategy that index vacuuming uses in all cases. Index
    8472              :              * VACUUM WAL records don't even have a snapshotConflictHorizon
    8473              :              * field of their own for this reason.
    8474              :              */
    8475       403018 :             if (!ItemIdIsNormal(lp))
    8476       252649 :                 break;
    8477              : 
    8478       150369 :             htup = (HeapTupleHeader) PageGetItem(page, lp);
    8479              : 
    8480              :             /*
    8481              :              * Check the tuple XMIN against prior XMAX, if any
    8482              :              */
    8483       162636 :             if (TransactionIdIsValid(priorXmax) &&
    8484        12267 :                 !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
    8485            0 :                 break;
    8486              : 
    8487       150369 :             HeapTupleHeaderAdvanceConflictHorizon(htup,
    8488              :                                                   &snapshotConflictHorizon);
    8489              : 
    8490              :             /*
    8491              :              * If the tuple is not HOT-updated, then we are at the end of this
    8492              :              * HOT-chain.  No need to visit later tuples from the same update
    8493              :              * chain (they get their own index entries) -- just move on to
    8494              :              * next htid from index AM caller.
    8495              :              */
    8496       150369 :             if (!HeapTupleHeaderIsHotUpdated(htup))
    8497       138102 :                 break;
    8498              : 
    8499              :             /* Advance to next HOT chain member */
    8500              :             Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
    8501        12267 :             offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
    8502        12267 :             priorXmax = HeapTupleHeaderGetUpdateXid(htup);
    8503              :         }
    8504              : 
    8505              :         /* Enable further/final shrinking of deltids for caller */
    8506       390751 :         finalndeltids = i + 1;
    8507              :     }
    8508              : 
    8509         7559 :     UnlockReleaseBuffer(buf);
    8510              : 
    8511              :     /*
    8512              :      * Shrink deltids array to exclude non-deletable entries at the end.  This
    8513              :      * is not just a minor optimization.  Final deltids array size might be
    8514              :      * zero for a bottom-up caller.  Index AM is explicitly allowed to rely on
    8515              :      * ndeltids being zero in all cases with zero total deletable entries.
    8516              :      */
    8517              :     Assert(finalndeltids > 0 || delstate->bottomup);
    8518         7559 :     delstate->ndeltids = finalndeltids;
    8519              : 
    8520         7559 :     return snapshotConflictHorizon;
    8521              : }
    8522              : 
    8523              : /*
    8524              :  * Specialized inlineable comparison function for index_delete_sort()
    8525              :  */
    8526              : static inline int
    8527     15554235 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
    8528              : {
    8529     15554235 :     ItemPointer tid1 = &deltid1->tid;
    8530     15554235 :     ItemPointer tid2 = &deltid2->tid;
    8531              : 
    8532              :     {
    8533     15554235 :         BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
    8534     15554235 :         BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
    8535              : 
    8536     15554235 :         if (blk1 != blk2)
    8537      6489243 :             return (blk1 < blk2) ? -1 : 1;
    8538              :     }
    8539              :     {
    8540      9064992 :         OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
    8541      9064992 :         OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
    8542              : 
    8543      9064992 :         if (pos1 != pos2)
    8544      9064992 :             return (pos1 < pos2) ? -1 : 1;
    8545              :     }
    8546              : 
    8547              :     Assert(false);
    8548              : 
    8549            0 :     return 0;
    8550              : }
    8551              : 
    8552              : /*
    8553              :  * Sort deltids array from delstate by TID.  This prepares it for further
    8554              :  * processing by heap_index_delete_tuples().
    8555              :  *
    8556              :  * This operation becomes a noticeable consumer of CPU cycles with some
    8557              :  * workloads, so we go to the trouble of specialization/micro optimization.
    8558              :  * We use shellsort for this because it's easy to specialize, compiles to
    8559              :  * relatively few instructions, and is adaptive to presorted inputs/subsets
    8560              :  * (which are typical here).
    8561              :  */
    8562              : static void
    8563         7559 : index_delete_sort(TM_IndexDeleteOp *delstate)
    8564              : {
    8565         7559 :     TM_IndexDelete *deltids = delstate->deltids;
    8566         7559 :     int         ndeltids = delstate->ndeltids;
    8567              : 
    8568              :     /*
    8569              :      * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
    8570              :      *
    8571              :      * This implementation is fast with array sizes up to ~4500.  This covers
    8572              :      * all supported BLCKSZ values.
    8573              :      */
    8574         7559 :     const int   gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
    8575              : 
    8576              :     /* Think carefully before changing anything here -- keep swaps cheap */
    8577              :     StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
    8578              :                      "element size exceeds 8 bytes");
    8579              : 
    8580        75590 :     for (int g = 0; g < lengthof(gaps); g++)
    8581              :     {
    8582      9370333 :         for (int hi = gaps[g], i = hi; i < ndeltids; i++)
    8583              :         {
    8584      9302302 :             TM_IndexDelete d = deltids[i];
    8585      9302302 :             int         j = i;
    8586              : 
    8587     16021147 :             while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
    8588              :             {
    8589      6718845 :                 deltids[j] = deltids[j - hi];
    8590      6718845 :                 j -= hi;
    8591              :             }
    8592      9302302 :             deltids[j] = d;
    8593              :         }
    8594              :     }
    8595         7559 : }
    8596              : 
    8597              : /*
    8598              :  * Returns how many blocks should be considered favorable/contiguous for a
    8599              :  * bottom-up index deletion pass.  This is a number of heap blocks that starts
    8600              :  * from and includes the first block in line.
    8601              :  *
    8602              :  * There is always at least one favorable block during bottom-up index
    8603              :  * deletion.  In the worst case (i.e. with totally random heap blocks) the
    8604              :  * first block in line (the only favorable block) can be thought of as a
    8605              :  * degenerate array of contiguous blocks that consists of a single block.
    8606              :  * heap_index_delete_tuples() will expect this.
    8607              :  *
    8608              :  * Caller passes blockgroups, a description of the final order that deltids
    8609              :  * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
    8610              :  * processing.  Note that deltids need not actually be sorted just yet (caller
    8611              :  * only passes deltids to us so that we can interpret blockgroups).
    8612              :  *
    8613              :  * You might guess that the existence of contiguous blocks cannot matter much,
    8614              :  * since in general the main factor that determines which blocks we visit is
    8615              :  * the number of promising TIDs, which is a fixed hint from the index AM.
    8616              :  * We're not really targeting the general case, though -- the actual goal is
    8617              :  * to adapt our behavior to a wide variety of naturally occurring conditions.
    8618              :  * The effects of most of the heuristics we apply are only noticeable in the
    8619              :  * aggregate, over time and across many _related_ bottom-up index deletion
    8620              :  * passes.
    8621              :  *
    8622              :  * Deeming certain blocks favorable allows heapam to recognize and adapt to
    8623              :  * workloads where heap blocks visited during bottom-up index deletion can be
    8624              :  * accessed contiguously, in the sense that each newly visited block is the
    8625              :  * neighbor of the block that bottom-up deletion just finished processing (or
    8626              :  * close enough to it).  It will likely be cheaper to access more favorable
    8627              :  * blocks sooner rather than later (e.g. in this pass, not across a series of
    8628              :  * related bottom-up passes).  Either way it is probably only a matter of time
    8629              :  * (or a matter of further correlated version churn) before all blocks that
    8630              :  * appear together as a single large batch of favorable blocks get accessed by
    8631              :  * _some_ bottom-up pass.  Large batches of favorable blocks tend to either
    8632              :  * appear almost constantly or not even once (it all depends on per-index
    8633              :  * workload characteristics).
    8634              :  *
    8635              :  * Note that the blockgroups sort order applies a power-of-two bucketing
    8636              :  * scheme that creates opportunities for contiguous groups of blocks to get
    8637              :  * batched together, at least with workloads that are naturally amenable to
    8638              :  * being driven by heap block locality.  This doesn't just enhance the spatial
    8639              :  * locality of bottom-up heap block processing in the obvious way.  It also
    8640              :  * enables temporal locality of access, since sorting by heap block number
    8641              :  * naturally tends to make the bottom-up processing order deterministic.
    8642              :  *
    8643              :  * Consider the following example to get a sense of how temporal locality
    8644              :  * might matter: There is a heap relation with several indexes, each of which
    8645              :  * is low to medium cardinality.  It is subject to constant non-HOT updates.
    8646              :  * The updates are skewed (in one part of the primary key, perhaps).  None of
    8647              :  * the indexes are logically modified by the UPDATE statements (if they were
    8648              :  * then bottom-up index deletion would not be triggered in the first place).
    8649              :  * Naturally, each new round of index tuples (for each heap tuple that gets a
    8650              :  * heap_update() call) will have the same heap TID in each and every index.
    8651              :  * Since these indexes are low cardinality and never get logically modified,
    8652              :  * heapam processing during bottom-up deletion passes will access heap blocks
    8653              :  * in approximately sequential order.  Temporal locality of access occurs due
    8654              :  * to bottom-up deletion passes behaving very similarly across each of the
    8655              :  * indexes at any given moment.  This keeps the number of buffer misses needed
    8656              :  * to visit heap blocks to a minimum.
    8657              :  */
    8658              : static int
    8659         2283 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
    8660              :                           TM_IndexDelete *deltids)
    8661              : {
    8662         2283 :     int64       lastblock = -1;
    8663         2283 :     int         nblocksfavorable = 0;
    8664              : 
    8665              :     Assert(nblockgroups >= 1);
    8666              :     Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
    8667              : 
    8668              :     /*
    8669              :      * We tolerate heap blocks that will be accessed only slightly out of
    8670              :      * physical order.  Small blips occur when a pair of almost-contiguous
    8671              :      * blocks happen to fall into different buckets (perhaps due only to a
    8672              :      * small difference in npromisingtids that the bucketing scheme didn't
    8673              :      * quite manage to ignore).  We effectively ignore these blips by applying
    8674              :      * a small tolerance.  The precise tolerance we use is a little arbitrary,
    8675              :      * but it works well enough in practice.
    8676              :      */
    8677         7206 :     for (int b = 0; b < nblockgroups; b++)
    8678              :     {
    8679         6895 :         IndexDeleteCounts *group = blockgroups + b;
    8680         6895 :         TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
    8681         6895 :         BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
    8682              : 
    8683         6895 :         if (lastblock != -1 &&
    8684         4612 :             ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
    8685         3990 :              (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
    8686              :             break;
    8687              : 
    8688         4923 :         nblocksfavorable++;
    8689         4923 :         lastblock = block;
    8690              :     }
    8691              : 
    8692              :     /* Always indicate that there is at least 1 favorable block */
    8693              :     Assert(nblocksfavorable >= 1);
    8694              : 
    8695         2283 :     return nblocksfavorable;
    8696              : }
    8697              : 
    8698              : /*
    8699              :  * qsort comparison function for bottomup_sort_and_shrink()
    8700              :  */
    8701              : static int
    8702       258955 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
    8703              : {
    8704       258955 :     const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
    8705       258955 :     const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
    8706              : 
    8707              :     /*
    8708              :      * Most significant field is npromisingtids (which we invert the order of
    8709              :      * so as to sort in desc order).
    8710              :      *
    8711              :      * Caller should have already normalized npromisingtids fields into
    8712              :      * power-of-two values (buckets).
    8713              :      */
    8714       258955 :     if (group1->npromisingtids > group2->npromisingtids)
    8715        11657 :         return -1;
    8716       247298 :     if (group1->npromisingtids < group2->npromisingtids)
    8717        14725 :         return 1;
    8718              : 
    8719              :     /*
    8720              :      * Tiebreak: desc ntids sort order.
    8721              :      *
    8722              :      * We cannot expect power-of-two values for ntids fields.  We should
    8723              :      * behave as if they were already rounded up for us instead.
    8724              :      */
    8725       232573 :     if (group1->ntids != group2->ntids)
    8726              :     {
    8727       166068 :         uint32      ntids1 = pg_nextpower2_32((uint32) group1->ntids);
    8728       166068 :         uint32      ntids2 = pg_nextpower2_32((uint32) group2->ntids);
    8729              : 
    8730       166068 :         if (ntids1 > ntids2)
    8731        23804 :             return -1;
    8732       142264 :         if (ntids1 < ntids2)
    8733        31246 :             return 1;
    8734              :     }
    8735              : 
    8736              :     /*
    8737              :      * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
    8738              :      * block in deltids array) order.
    8739              :      *
    8740              :      * This is equivalent to sorting in ascending heap block number order
    8741              :      * (among otherwise equal subsets of the array).  This approach allows us
    8742              :      * to avoid accessing the out-of-line TID.  (We rely on the assumption
    8743              :      * that the deltids array was sorted in ascending heap TID order when
    8744              :      * these offsets to the first TID from each heap block group were formed.)
    8745              :      */
    8746       177523 :     if (group1->ifirsttid > group2->ifirsttid)
    8747        88208 :         return 1;
    8748        89315 :     if (group1->ifirsttid < group2->ifirsttid)
    8749        89315 :         return -1;
    8750              : 
    8751            0 :     pg_unreachable();
    8752              : 
    8753              :     return 0;
    8754              : }
    8755              : 
    8756              : /*
    8757              :  * heap_index_delete_tuples() helper function for bottom-up deletion callers.
    8758              :  *
    8759              :  * Sorts deltids array in the order needed for useful processing by bottom-up
    8760              :  * deletion.  The array should already be sorted in TID order when we're
    8761              :  * called.  The sort process groups heap TIDs from deltids into heap block
    8762              :  * groupings.  Earlier/more-promising groups/blocks are usually those that are
    8763              :  * known to have the most "promising" TIDs.
    8764              :  *
    8765              :  * Sets new size of deltids array (ndeltids) in state.  deltids will only have
    8766              :  * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
    8767              :  * return.  This often means that deltids will be shrunk to a small fraction
    8768              :  * of its original size (we eliminate many heap blocks from consideration for
    8769              :  * caller up front).
    8770              :  *
    8771              :  * Returns the number of "favorable" blocks.  See bottomup_nblocksfavorable()
    8772              :  * for a definition and full details.
    8773              :  */
    8774              : static int
    8775         2283 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
    8776              : {
    8777              :     IndexDeleteCounts *blockgroups;
    8778              :     TM_IndexDelete *reordereddeltids;
    8779         2283 :     BlockNumber curblock = InvalidBlockNumber;
    8780         2283 :     int         nblockgroups = 0;
    8781         2283 :     int         ncopied = 0;
    8782         2283 :     int         nblocksfavorable = 0;
    8783              : 
    8784              :     Assert(delstate->bottomup);
    8785              :     Assert(delstate->ndeltids > 0);
    8786              : 
    8787              :     /* Calculate per-heap-block count of TIDs */
    8788         2283 :     blockgroups = palloc_array(IndexDeleteCounts, delstate->ndeltids);
    8789      1133689 :     for (int i = 0; i < delstate->ndeltids; i++)
    8790              :     {
    8791      1131406 :         TM_IndexDelete *ideltid = &delstate->deltids[i];
    8792      1131406 :         TM_IndexStatus *istatus = delstate->status + ideltid->id;
    8793      1131406 :         ItemPointer htid = &ideltid->tid;
    8794      1131406 :         bool        promising = istatus->promising;
    8795              : 
    8796      1131406 :         if (curblock != ItemPointerGetBlockNumber(htid))
    8797              :         {
    8798              :             /* New block group */
    8799        48740 :             nblockgroups++;
    8800              : 
    8801              :             Assert(curblock < ItemPointerGetBlockNumber(htid) ||
    8802              :                    !BlockNumberIsValid(curblock));
    8803              : 
    8804        48740 :             curblock = ItemPointerGetBlockNumber(htid);
    8805        48740 :             blockgroups[nblockgroups - 1].ifirsttid = i;
    8806        48740 :             blockgroups[nblockgroups - 1].ntids = 1;
    8807        48740 :             blockgroups[nblockgroups - 1].npromisingtids = 0;
    8808              :         }
    8809              :         else
    8810              :         {
    8811      1082666 :             blockgroups[nblockgroups - 1].ntids++;
    8812              :         }
    8813              : 
    8814      1131406 :         if (promising)
    8815       140871 :             blockgroups[nblockgroups - 1].npromisingtids++;
    8816              :     }
    8817              : 
    8818              :     /*
    8819              :      * We're about ready to sort block groups to determine the optimal order
    8820              :      * for visiting heap blocks.  But before we do, round the number of
    8821              :      * promising tuples for each block group up to the next power-of-two,
    8822              :      * unless it is very low (less than 4), in which case we round up to 4.
    8823              :      * npromisingtids is far too noisy to trust when choosing between a pair
    8824              :      * of block groups that both have very low values.
    8825              :      *
    8826              :      * This scheme divides heap blocks/block groups into buckets.  Each bucket
    8827              :      * contains blocks that have _approximately_ the same number of promising
    8828              :      * TIDs as each other.  The goal is to ignore relatively small differences
    8829              :      * in the total number of promising entries, so that the whole process can
    8830              :      * give a little weight to heapam factors (like heap block locality)
    8831              :      * instead.  This isn't a trade-off, really -- we have nothing to lose. It
    8832              :      * would be foolish to interpret small differences in npromisingtids
    8833              :      * values as anything more than noise.
    8834              :      *
    8835              :      * We tiebreak on nhtids when sorting block group subsets that have the
    8836              :      * same npromisingtids, but this has the same issues as npromisingtids,
    8837              :      * and so nhtids is subject to the same power-of-two bucketing scheme. The
    8838              :      * only reason that we don't fix nhtids in the same way here too is that
    8839              :      * we'll need accurate nhtids values after the sort.  We handle nhtids
    8840              :      * bucketization dynamically instead (in the sort comparator).
    8841              :      *
    8842              :      * See bottomup_nblocksfavorable() for a full explanation of when and how
    8843              :      * heap locality/favorable blocks can significantly influence when and how
    8844              :      * heap blocks are accessed.
    8845              :      */
    8846        51023 :     for (int b = 0; b < nblockgroups; b++)
    8847              :     {
    8848        48740 :         IndexDeleteCounts *group = blockgroups + b;
    8849              : 
    8850              :         /* Better off falling back on nhtids with low npromisingtids */
    8851        48740 :         if (group->npromisingtids <= 4)
    8852        41761 :             group->npromisingtids = 4;
    8853              :         else
    8854         6979 :             group->npromisingtids =
    8855         6979 :                 pg_nextpower2_32((uint32) group->npromisingtids);
    8856              :     }
    8857              : 
    8858              :     /* Sort groups and rearrange caller's deltids array */
    8859         2283 :     qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
    8860              :           bottomup_sort_and_shrink_cmp);
    8861         2283 :     reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
    8862              : 
    8863         2283 :     nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
    8864              :     /* Determine number of favorable blocks at the start of final deltids */
    8865         2283 :     nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
    8866              :                                                  delstate->deltids);
    8867              : 
    8868        15351 :     for (int b = 0; b < nblockgroups; b++)
    8869              :     {
    8870        13068 :         IndexDeleteCounts *group = blockgroups + b;
    8871        13068 :         TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
    8872              : 
    8873        13068 :         memcpy(reordereddeltids + ncopied, firstdtid,
    8874        13068 :                sizeof(TM_IndexDelete) * group->ntids);
    8875        13068 :         ncopied += group->ntids;
    8876              :     }
    8877              : 
    8878              :     /* Copy final grouped and sorted TIDs back into start of caller's array */
    8879         2283 :     memcpy(delstate->deltids, reordereddeltids,
    8880              :            sizeof(TM_IndexDelete) * ncopied);
    8881         2283 :     delstate->ndeltids = ncopied;
    8882              : 
    8883         2283 :     pfree(reordereddeltids);
    8884         2283 :     pfree(blockgroups);
    8885              : 
    8886         2283 :     return nblocksfavorable;
    8887              : }
    8888              : 
    8889              : /*
    8890              :  * Perform XLogInsert for a heap-visible operation.  'block' is the block
    8891              :  * being marked all-visible, and vm_buffer is the buffer containing the
    8892              :  * corresponding visibility map block.  Both should have already been modified
    8893              :  * and dirtied.
    8894              :  *
    8895              :  * snapshotConflictHorizon comes from the largest xmin on the page being
    8896              :  * marked all-visible.  REDO routine uses it to generate recovery conflicts.
    8897              :  *
    8898              :  * If checksums or wal_log_hints are enabled, we may also generate a full-page
    8899              :  * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
    8900              :  * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
    8901              :  * update the heap page's LSN.
    8902              :  */
    8903              : XLogRecPtr
    8904        41721 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
    8905              :                  TransactionId snapshotConflictHorizon, uint8 vmflags)
    8906              : {
    8907              :     xl_heap_visible xlrec;
    8908              :     XLogRecPtr  recptr;
    8909              :     uint8       flags;
    8910              : 
    8911              :     Assert(BufferIsValid(heap_buffer));
    8912              :     Assert(BufferIsValid(vm_buffer));
    8913              : 
    8914        41721 :     xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
    8915        41721 :     xlrec.flags = vmflags;
    8916        41721 :     if (RelationIsAccessibleInLogicalDecoding(rel))
    8917           54 :         xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
    8918        41721 :     XLogBeginInsert();
    8919        41721 :     XLogRegisterData(&xlrec, SizeOfHeapVisible);
    8920              : 
    8921        41721 :     XLogRegisterBuffer(0, vm_buffer, 0);
    8922              : 
    8923        41721 :     flags = REGBUF_STANDARD;
    8924        41721 :     if (!XLogHintBitIsNeeded())
    8925         3246 :         flags |= REGBUF_NO_IMAGE;
    8926        41721 :     XLogRegisterBuffer(1, heap_buffer, flags);
    8927              : 
    8928        41721 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
    8929              : 
    8930        41721 :     return recptr;
    8931              : }
    8932              : 
    8933              : /*
    8934              :  * Perform XLogInsert for a heap-update operation.  Caller must already
    8935              :  * have modified the buffer(s) and marked them dirty.
    8936              :  */
    8937              : static XLogRecPtr
    8938       324625 : log_heap_update(Relation reln, Buffer oldbuf,
    8939              :                 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
    8940              :                 HeapTuple old_key_tuple,
    8941              :                 bool all_visible_cleared, bool new_all_visible_cleared)
    8942              : {
    8943              :     xl_heap_update xlrec;
    8944              :     xl_heap_header xlhdr;
    8945              :     xl_heap_header xlhdr_idx;
    8946              :     uint8       info;
    8947              :     uint16      prefix_suffix[2];
    8948       324625 :     uint16      prefixlen = 0,
    8949       324625 :                 suffixlen = 0;
    8950              :     XLogRecPtr  recptr;
    8951       324625 :     Page        page = BufferGetPage(newbuf);
    8952       324625 :     bool        need_tuple_data = RelationIsLogicallyLogged(reln);
    8953              :     bool        init;
    8954              :     int         bufflags;
    8955              : 
    8956              :     /* Caller should not call me on a non-WAL-logged relation */
    8957              :     Assert(RelationNeedsWAL(reln));
    8958              : 
    8959       324625 :     XLogBeginInsert();
    8960              : 
    8961       324625 :     if (HeapTupleIsHeapOnly(newtup))
    8962       162275 :         info = XLOG_HEAP_HOT_UPDATE;
    8963              :     else
    8964       162350 :         info = XLOG_HEAP_UPDATE;
    8965              : 
    8966              :     /*
    8967              :      * If the old and new tuple are on the same page, we only need to log the
    8968              :      * parts of the new tuple that were changed.  That saves on the amount of
    8969              :      * WAL we need to write.  Currently, we just count any unchanged bytes in
    8970              :      * the beginning and end of the tuple.  That's quick to check, and
    8971              :      * perfectly covers the common case that only one field is updated.
    8972              :      *
    8973              :      * We could do this even if the old and new tuple are on different pages,
    8974              :      * but only if we don't make a full-page image of the old page, which is
    8975              :      * difficult to know in advance.  Also, if the old tuple is corrupt for
    8976              :      * some reason, it would allow the corruption to propagate the new page,
    8977              :      * so it seems best to avoid.  Under the general assumption that most
    8978              :      * updates tend to create the new tuple version on the same page, there
    8979              :      * isn't much to be gained by doing this across pages anyway.
    8980              :      *
    8981              :      * Skip this if we're taking a full-page image of the new page, as we
    8982              :      * don't include the new tuple in the WAL record in that case.  Also
    8983              :      * disable if effective_wal_level='logical', as logical decoding needs to
    8984              :      * be able to read the new tuple in whole from the WAL record alone.
    8985              :      */
    8986       324625 :     if (oldbuf == newbuf && !need_tuple_data &&
    8987       166168 :         !XLogCheckBufferNeedsBackup(newbuf))
    8988              :     {
    8989       165616 :         char       *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
    8990       165616 :         char       *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
    8991       165616 :         int         oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
    8992       165616 :         int         newlen = newtup->t_len - newtup->t_data->t_hoff;
    8993              : 
    8994              :         /* Check for common prefix between old and new tuple */
    8995     13888096 :         for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
    8996              :         {
    8997     13861563 :             if (newp[prefixlen] != oldp[prefixlen])
    8998       139083 :                 break;
    8999              :         }
    9000              : 
    9001              :         /*
    9002              :          * Storing the length of the prefix takes 2 bytes, so we need to save
    9003              :          * at least 3 bytes or there's no point.
    9004              :          */
    9005       165616 :         if (prefixlen < 3)
    9006        22615 :             prefixlen = 0;
    9007              : 
    9008              :         /* Same for suffix */
    9009      6100592 :         for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
    9010              :         {
    9011      6073741 :             if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
    9012       138765 :                 break;
    9013              :         }
    9014       165616 :         if (suffixlen < 3)
    9015        39569 :             suffixlen = 0;
    9016              :     }
    9017              : 
    9018              :     /* Prepare main WAL data chain */
    9019       324625 :     xlrec.flags = 0;
    9020       324625 :     if (all_visible_cleared)
    9021         2114 :         xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
    9022       324625 :     if (new_all_visible_cleared)
    9023          787 :         xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
    9024       324625 :     if (prefixlen > 0)
    9025       143001 :         xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
    9026       324625 :     if (suffixlen > 0)
    9027       126047 :         xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
    9028       324625 :     if (need_tuple_data)
    9029              :     {
    9030        47025 :         xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
    9031        47025 :         if (old_key_tuple)
    9032              :         {
    9033          147 :             if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    9034           65 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
    9035              :             else
    9036           82 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
    9037              :         }
    9038              :     }
    9039              : 
    9040              :     /* If new tuple is the single and first tuple on page... */
    9041       328554 :     if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
    9042         3929 :         PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    9043              :     {
    9044         3724 :         info |= XLOG_HEAP_INIT_PAGE;
    9045         3724 :         init = true;
    9046              :     }
    9047              :     else
    9048       320901 :         init = false;
    9049              : 
    9050              :     /* Prepare WAL data for the old page */
    9051       324625 :     xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
    9052       324625 :     xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
    9053       649250 :     xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
    9054       324625 :                                               oldtup->t_data->t_infomask2);
    9055              : 
    9056              :     /* Prepare WAL data for the new page */
    9057       324625 :     xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
    9058       324625 :     xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
    9059              : 
    9060       324625 :     bufflags = REGBUF_STANDARD;
    9061       324625 :     if (init)
    9062         3724 :         bufflags |= REGBUF_WILL_INIT;
    9063       324625 :     if (need_tuple_data)
    9064        47025 :         bufflags |= REGBUF_KEEP_DATA;
    9065              : 
    9066       324625 :     XLogRegisterBuffer(0, newbuf, bufflags);
    9067       324625 :     if (oldbuf != newbuf)
    9068       146512 :         XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
    9069              : 
    9070       324625 :     XLogRegisterData(&xlrec, SizeOfHeapUpdate);
    9071              : 
    9072              :     /*
    9073              :      * Prepare WAL data for the new tuple.
    9074              :      */
    9075       324625 :     if (prefixlen > 0 || suffixlen > 0)
    9076              :     {
    9077       165050 :         if (prefixlen > 0 && suffixlen > 0)
    9078              :         {
    9079       103998 :             prefix_suffix[0] = prefixlen;
    9080       103998 :             prefix_suffix[1] = suffixlen;
    9081       103998 :             XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
    9082              :         }
    9083        61052 :         else if (prefixlen > 0)
    9084              :         {
    9085        39003 :             XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
    9086              :         }
    9087              :         else
    9088              :         {
    9089        22049 :             XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
    9090              :         }
    9091              :     }
    9092              : 
    9093       324625 :     xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
    9094       324625 :     xlhdr.t_infomask = newtup->t_data->t_infomask;
    9095       324625 :     xlhdr.t_hoff = newtup->t_data->t_hoff;
    9096              :     Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
    9097              : 
    9098              :     /*
    9099              :      * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
    9100              :      *
    9101              :      * The 'data' doesn't include the common prefix or suffix.
    9102              :      */
    9103       324625 :     XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
    9104       324625 :     if (prefixlen == 0)
    9105              :     {
    9106       181624 :         XLogRegisterBufData(0,
    9107       181624 :                             (char *) newtup->t_data + SizeofHeapTupleHeader,
    9108       181624 :                             newtup->t_len - SizeofHeapTupleHeader - suffixlen);
    9109              :     }
    9110              :     else
    9111              :     {
    9112              :         /*
    9113              :          * Have to write the null bitmap and data after the common prefix as
    9114              :          * two separate rdata entries.
    9115              :          */
    9116              :         /* bitmap [+ padding] [+ oid] */
    9117       143001 :         if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
    9118              :         {
    9119       143001 :             XLogRegisterBufData(0,
    9120       143001 :                                 (char *) newtup->t_data + SizeofHeapTupleHeader,
    9121       143001 :                                 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
    9122              :         }
    9123              : 
    9124              :         /* data after common prefix */
    9125       143001 :         XLogRegisterBufData(0,
    9126       143001 :                             (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
    9127       143001 :                             newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
    9128              :     }
    9129              : 
    9130              :     /* We need to log a tuple identity */
    9131       324625 :     if (need_tuple_data && old_key_tuple)
    9132              :     {
    9133              :         /* don't really need this, but its more comfy to decode */
    9134          147 :         xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    9135          147 :         xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
    9136          147 :         xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
    9137              : 
    9138          147 :         XLogRegisterData(&xlhdr_idx, SizeOfHeapHeader);
    9139              : 
    9140              :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    9141          147 :         XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
    9142          147 :                          old_key_tuple->t_len - SizeofHeapTupleHeader);
    9143              :     }
    9144              : 
    9145              :     /* filtering by origin on a row level is much more efficient */
    9146       324625 :     XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    9147              : 
    9148       324625 :     recptr = XLogInsert(RM_HEAP_ID, info);
    9149              : 
    9150       324625 :     return recptr;
    9151              : }
    9152              : 
    9153              : /*
    9154              :  * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
    9155              :  *
    9156              :  * This is only used when effective_wal_level is logical, and only for
    9157              :  * catalog tuples.
    9158              :  */
    9159              : static XLogRecPtr
    9160        25088 : log_heap_new_cid(Relation relation, HeapTuple tup)
    9161              : {
    9162              :     xl_heap_new_cid xlrec;
    9163              : 
    9164              :     XLogRecPtr  recptr;
    9165        25088 :     HeapTupleHeader hdr = tup->t_data;
    9166              : 
    9167              :     Assert(ItemPointerIsValid(&tup->t_self));
    9168              :     Assert(tup->t_tableOid != InvalidOid);
    9169              : 
    9170        25088 :     xlrec.top_xid = GetTopTransactionId();
    9171        25088 :     xlrec.target_locator = relation->rd_locator;
    9172        25088 :     xlrec.target_tid = tup->t_self;
    9173              : 
    9174              :     /*
    9175              :      * If the tuple got inserted & deleted in the same TX we definitely have a
    9176              :      * combo CID, set cmin and cmax.
    9177              :      */
    9178        25088 :     if (hdr->t_infomask & HEAP_COMBOCID)
    9179              :     {
    9180              :         Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
    9181              :         Assert(!HeapTupleHeaderXminInvalid(hdr));
    9182         2027 :         xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
    9183         2027 :         xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
    9184         2027 :         xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
    9185              :     }
    9186              :     /* No combo CID, so only cmin or cmax can be set by this TX */
    9187              :     else
    9188              :     {
    9189              :         /*
    9190              :          * Tuple inserted.
    9191              :          *
    9192              :          * We need to check for LOCK ONLY because multixacts might be
    9193              :          * transferred to the new tuple in case of FOR KEY SHARE updates in
    9194              :          * which case there will be an xmax, although the tuple just got
    9195              :          * inserted.
    9196              :          */
    9197        30061 :         if (hdr->t_infomask & HEAP_XMAX_INVALID ||
    9198         7000 :             HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
    9199              :         {
    9200        16062 :             xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
    9201        16062 :             xlrec.cmax = InvalidCommandId;
    9202              :         }
    9203              :         /* Tuple from a different tx updated or deleted. */
    9204              :         else
    9205              :         {
    9206         6999 :             xlrec.cmin = InvalidCommandId;
    9207         6999 :             xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
    9208              :         }
    9209        23061 :         xlrec.combocid = InvalidCommandId;
    9210              :     }
    9211              : 
    9212              :     /*
    9213              :      * Note that we don't need to register the buffer here, because this
    9214              :      * operation does not modify the page. The insert/update/delete that
    9215              :      * called us certainly did, but that's WAL-logged separately.
    9216              :      */
    9217        25088 :     XLogBeginInsert();
    9218        25088 :     XLogRegisterData(&xlrec, SizeOfHeapNewCid);
    9219              : 
    9220              :     /* will be looked at irrespective of origin */
    9221              : 
    9222        25088 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
    9223              : 
    9224        25088 :     return recptr;
    9225              : }
    9226              : 
    9227              : /*
    9228              :  * Build a heap tuple representing the configured REPLICA IDENTITY to represent
    9229              :  * the old tuple in an UPDATE or DELETE.
    9230              :  *
    9231              :  * Returns NULL if there's no need to log an identity or if there's no suitable
    9232              :  * key defined.
    9233              :  *
    9234              :  * Pass key_required true if any replica identity columns changed value, or if
    9235              :  * any of them have any external data.  Delete must always pass true.
    9236              :  *
    9237              :  * *copy is set to true if the returned tuple is a modified copy rather than
    9238              :  * the same tuple that was passed in.
    9239              :  */
    9240              : static HeapTuple
    9241      2181001 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
    9242              :                        bool *copy)
    9243              : {
    9244      2181001 :     TupleDesc   desc = RelationGetDescr(relation);
    9245      2181001 :     char        replident = relation->rd_rel->relreplident;
    9246              :     Bitmapset  *idattrs;
    9247              :     HeapTuple   key_tuple;
    9248              :     bool        nulls[MaxHeapAttributeNumber];
    9249              :     Datum       values[MaxHeapAttributeNumber];
    9250              : 
    9251      2181001 :     *copy = false;
    9252              : 
    9253      2181001 :     if (!RelationIsLogicallyLogged(relation))
    9254      2080708 :         return NULL;
    9255              : 
    9256       100293 :     if (replident == REPLICA_IDENTITY_NOTHING)
    9257          228 :         return NULL;
    9258              : 
    9259       100065 :     if (replident == REPLICA_IDENTITY_FULL)
    9260              :     {
    9261              :         /*
    9262              :          * When logging the entire old tuple, it very well could contain
    9263              :          * toasted columns. If so, force them to be inlined.
    9264              :          */
    9265          197 :         if (HeapTupleHasExternal(tp))
    9266              :         {
    9267            4 :             *copy = true;
    9268            4 :             tp = toast_flatten_tuple(tp, desc);
    9269              :         }
    9270          197 :         return tp;
    9271              :     }
    9272              : 
    9273              :     /* if the key isn't required and we're only logging the key, we're done */
    9274        99868 :     if (!key_required)
    9275        46878 :         return NULL;
    9276              : 
    9277              :     /* find out the replica identity columns */
    9278        52990 :     idattrs = RelationGetIndexAttrBitmap(relation,
    9279              :                                          INDEX_ATTR_BITMAP_IDENTITY_KEY);
    9280              : 
    9281              :     /*
    9282              :      * If there's no defined replica identity columns, treat as !key_required.
    9283              :      * (This case should not be reachable from heap_update, since that should
    9284              :      * calculate key_required accurately.  But heap_delete just passes
    9285              :      * constant true for key_required, so we can hit this case in deletes.)
    9286              :      */
    9287        52990 :     if (bms_is_empty(idattrs))
    9288         6021 :         return NULL;
    9289              : 
    9290              :     /*
    9291              :      * Construct a new tuple containing only the replica identity columns,
    9292              :      * with nulls elsewhere.  While we're at it, assert that the replica
    9293              :      * identity columns aren't null.
    9294              :      */
    9295        46969 :     heap_deform_tuple(tp, desc, values, nulls);
    9296              : 
    9297       150902 :     for (int i = 0; i < desc->natts; i++)
    9298              :     {
    9299       103933 :         if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
    9300              :                           idattrs))
    9301              :             Assert(!nulls[i]);
    9302              :         else
    9303        56952 :             nulls[i] = true;
    9304              :     }
    9305              : 
    9306        46969 :     key_tuple = heap_form_tuple(desc, values, nulls);
    9307        46969 :     *copy = true;
    9308              : 
    9309        46969 :     bms_free(idattrs);
    9310              : 
    9311              :     /*
    9312              :      * If the tuple, which by here only contains indexed columns, still has
    9313              :      * toasted columns, force them to be inlined. This is somewhat unlikely
    9314              :      * since there's limits on the size of indexed columns, so we don't
    9315              :      * duplicate toast_flatten_tuple()s functionality in the above loop over
    9316              :      * the indexed columns, even if it would be more efficient.
    9317              :      */
    9318        46969 :     if (HeapTupleHasExternal(key_tuple))
    9319              :     {
    9320            4 :         HeapTuple   oldtup = key_tuple;
    9321              : 
    9322            4 :         key_tuple = toast_flatten_tuple(oldtup, desc);
    9323            4 :         heap_freetuple(oldtup);
    9324              :     }
    9325              : 
    9326        46969 :     return key_tuple;
    9327              : }
    9328              : 
    9329              : /*
    9330              :  * HeapCheckForSerializableConflictOut
    9331              :  *      We are reading a tuple.  If it's not visible, there may be a
    9332              :  *      rw-conflict out with the inserter.  Otherwise, if it is visible to us
    9333              :  *      but has been deleted, there may be a rw-conflict out with the deleter.
    9334              :  *
    9335              :  * We will determine the top level xid of the writing transaction with which
    9336              :  * we may be in conflict, and ask CheckForSerializableConflictOut() to check
    9337              :  * for overlap with our own transaction.
    9338              :  *
    9339              :  * This function should be called just about anywhere in heapam.c where a
    9340              :  * tuple has been read. The caller must hold at least a shared lock on the
    9341              :  * buffer, because this function might set hint bits on the tuple. There is
    9342              :  * currently no known reason to call this function from an index AM.
    9343              :  */
    9344              : void
    9345     39809497 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
    9346              :                                     HeapTuple tuple, Buffer buffer,
    9347              :                                     Snapshot snapshot)
    9348              : {
    9349              :     TransactionId xid;
    9350              :     HTSV_Result htsvResult;
    9351              : 
    9352     39809497 :     if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
    9353     39784089 :         return;
    9354              : 
    9355              :     /*
    9356              :      * Check to see whether the tuple has been written to by a concurrent
    9357              :      * transaction, either to create it not visible to us, or to delete it
    9358              :      * while it is visible to us.  The "visible" bool indicates whether the
    9359              :      * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
    9360              :      * is going on with it.
    9361              :      *
    9362              :      * In the event of a concurrently inserted tuple that also happens to have
    9363              :      * been concurrently updated (by a separate transaction), the xmin of the
    9364              :      * tuple will be used -- not the updater's xid.
    9365              :      */
    9366        25408 :     htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
    9367        25408 :     switch (htsvResult)
    9368              :     {
    9369        24610 :         case HEAPTUPLE_LIVE:
    9370        24610 :             if (visible)
    9371        24597 :                 return;
    9372           13 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9373           13 :             break;
    9374          361 :         case HEAPTUPLE_RECENTLY_DEAD:
    9375              :         case HEAPTUPLE_DELETE_IN_PROGRESS:
    9376          361 :             if (visible)
    9377          286 :                 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    9378              :             else
    9379           75 :                 xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9380              : 
    9381          361 :             if (TransactionIdPrecedes(xid, TransactionXmin))
    9382              :             {
    9383              :                 /* This is like the HEAPTUPLE_DEAD case */
    9384              :                 Assert(!visible);
    9385           70 :                 return;
    9386              :             }
    9387          291 :             break;
    9388          327 :         case HEAPTUPLE_INSERT_IN_PROGRESS:
    9389          327 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9390          327 :             break;
    9391          110 :         case HEAPTUPLE_DEAD:
    9392              :             Assert(!visible);
    9393          110 :             return;
    9394            0 :         default:
    9395              : 
    9396              :             /*
    9397              :              * The only way to get to this default clause is if a new value is
    9398              :              * added to the enum type without adding it to this switch
    9399              :              * statement.  That's a bug, so elog.
    9400              :              */
    9401            0 :             elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
    9402              : 
    9403              :             /*
    9404              :              * In spite of having all enum values covered and calling elog on
    9405              :              * this default, some compilers think this is a code path which
    9406              :              * allows xid to be used below without initialization. Silence
    9407              :              * that warning.
    9408              :              */
    9409              :             xid = InvalidTransactionId;
    9410              :     }
    9411              : 
    9412              :     Assert(TransactionIdIsValid(xid));
    9413              :     Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
    9414              : 
    9415              :     /*
    9416              :      * Find top level xid.  Bail out if xid is too early to be a conflict, or
    9417              :      * if it's our own xid.
    9418              :      */
    9419          631 :     if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
    9420           64 :         return;
    9421          567 :     xid = SubTransGetTopmostTransaction(xid);
    9422          567 :     if (TransactionIdPrecedes(xid, TransactionXmin))
    9423            0 :         return;
    9424              : 
    9425          567 :     CheckForSerializableConflictOut(relation, xid, snapshot);
    9426              : }
        

Generated by: LCOV version 2.0-1