LCOV - code coverage report
Current view: top level - src/backend/access/heap - heapam.c (source / functions) Hit Total Coverage
Test: PostgreSQL 17devel Lines: 2853 3165 90.1 %
Date: 2024-04-25 06:13:26 Functions: 91 93 97.8 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * heapam.c
       4             :  *    heap access method code
       5             :  *
       6             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/heap/heapam.c
      12             :  *
      13             :  *
      14             :  * INTERFACE ROUTINES
      15             :  *      heap_beginscan  - begin relation scan
      16             :  *      heap_rescan     - restart a relation scan
      17             :  *      heap_endscan    - end relation scan
      18             :  *      heap_getnext    - retrieve next tuple in scan
      19             :  *      heap_fetch      - retrieve tuple with given tid
      20             :  *      heap_insert     - insert tuple into a relation
      21             :  *      heap_multi_insert - insert multiple tuples into a relation
      22             :  *      heap_delete     - delete a tuple from a relation
      23             :  *      heap_update     - replace a tuple in a relation with another tuple
      24             :  *
      25             :  * NOTES
      26             :  *    This file contains the heap_ routines which implement
      27             :  *    the POSTGRES heap access method used for all POSTGRES
      28             :  *    relations.
      29             :  *
      30             :  *-------------------------------------------------------------------------
      31             :  */
      32             : #include "postgres.h"
      33             : 
      34             : #include "access/bufmask.h"
      35             : #include "access/heapam.h"
      36             : #include "access/heapam_xlog.h"
      37             : #include "access/heaptoast.h"
      38             : #include "access/hio.h"
      39             : #include "access/multixact.h"
      40             : #include "access/parallel.h"
      41             : #include "access/relscan.h"
      42             : #include "access/subtrans.h"
      43             : #include "access/syncscan.h"
      44             : #include "access/sysattr.h"
      45             : #include "access/tableam.h"
      46             : #include "access/transam.h"
      47             : #include "access/valid.h"
      48             : #include "access/visibilitymap.h"
      49             : #include "access/xact.h"
      50             : #include "access/xlog.h"
      51             : #include "access/xloginsert.h"
      52             : #include "access/xlogutils.h"
      53             : #include "catalog/catalog.h"
      54             : #include "commands/vacuum.h"
      55             : #include "miscadmin.h"
      56             : #include "pgstat.h"
      57             : #include "port/atomics.h"
      58             : #include "port/pg_bitutils.h"
      59             : #include "storage/bufmgr.h"
      60             : #include "storage/freespace.h"
      61             : #include "storage/lmgr.h"
      62             : #include "storage/predicate.h"
      63             : #include "storage/procarray.h"
      64             : #include "storage/standby.h"
      65             : #include "utils/datum.h"
      66             : #include "utils/inval.h"
      67             : #include "utils/relcache.h"
      68             : #include "utils/snapmgr.h"
      69             : #include "utils/spccache.h"
      70             : 
      71             : 
      72             : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
      73             :                                      TransactionId xid, CommandId cid, int options);
      74             : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
      75             :                                   Buffer newbuf, HeapTuple oldtup,
      76             :                                   HeapTuple newtup, HeapTuple old_key_tuple,
      77             :                                   bool all_visible_cleared, bool new_all_visible_cleared);
      78             : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
      79             :                                            Bitmapset *interesting_cols,
      80             :                                            Bitmapset *external_cols,
      81             :                                            HeapTuple oldtup, HeapTuple newtup,
      82             :                                            bool *has_external);
      83             : static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
      84             :                                  LockTupleMode mode, LockWaitPolicy wait_policy,
      85             :                                  bool *have_tuple_lock);
      86             : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
      87             :                                                    BlockNumber block,
      88             :                                                    ScanDirection dir);
      89             : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
      90             :                                                         ScanDirection dir);
      91             : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
      92             :                                       uint16 old_infomask2, TransactionId add_to_xmax,
      93             :                                       LockTupleMode mode, bool is_update,
      94             :                                       TransactionId *result_xmax, uint16 *result_infomask,
      95             :                                       uint16 *result_infomask2);
      96             : static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
      97             :                                          ItemPointer ctid, TransactionId xid,
      98             :                                          LockTupleMode mode);
      99             : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
     100             :                                    uint16 *new_infomask2);
     101             : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
     102             :                                              uint16 t_infomask);
     103             : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
     104             :                                     LockTupleMode lockmode, bool *current_is_member);
     105             : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
     106             :                             Relation rel, ItemPointer ctid, XLTW_Oper oper,
     107             :                             int *remaining);
     108             : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
     109             :                                        uint16 infomask, Relation rel, int *remaining);
     110             : static void index_delete_sort(TM_IndexDeleteOp *delstate);
     111             : static int  bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
     112             : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
     113             : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
     114             :                                         bool *copy);
     115             : 
     116             : 
     117             : /*
     118             :  * Each tuple lock mode has a corresponding heavyweight lock, and one or two
     119             :  * corresponding MultiXactStatuses (one to merely lock tuples, another one to
     120             :  * update them).  This table (and the macros below) helps us determine the
     121             :  * heavyweight lock mode and MultiXactStatus values to use for any particular
     122             :  * tuple lock strength.
     123             :  *
     124             :  * Don't look at lockstatus/updstatus directly!  Use get_mxact_status_for_lock
     125             :  * instead.
     126             :  */
     127             : static const struct
     128             : {
     129             :     LOCKMODE    hwlock;
     130             :     int         lockstatus;
     131             :     int         updstatus;
     132             : }
     133             : 
     134             :             tupleLockExtraInfo[MaxLockTupleMode + 1] =
     135             : {
     136             :     {                           /* LockTupleKeyShare */
     137             :         AccessShareLock,
     138             :         MultiXactStatusForKeyShare,
     139             :         -1                      /* KeyShare does not allow updating tuples */
     140             :     },
     141             :     {                           /* LockTupleShare */
     142             :         RowShareLock,
     143             :         MultiXactStatusForShare,
     144             :         -1                      /* Share does not allow updating tuples */
     145             :     },
     146             :     {                           /* LockTupleNoKeyExclusive */
     147             :         ExclusiveLock,
     148             :         MultiXactStatusForNoKeyUpdate,
     149             :         MultiXactStatusNoKeyUpdate
     150             :     },
     151             :     {                           /* LockTupleExclusive */
     152             :         AccessExclusiveLock,
     153             :         MultiXactStatusForUpdate,
     154             :         MultiXactStatusUpdate
     155             :     }
     156             : };
     157             : 
     158             : /* Get the LOCKMODE for a given MultiXactStatus */
     159             : #define LOCKMODE_from_mxstatus(status) \
     160             :             (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
     161             : 
     162             : /*
     163             :  * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
     164             :  * This is more readable than having every caller translate it to lock.h's
     165             :  * LOCKMODE.
     166             :  */
     167             : #define LockTupleTuplock(rel, tup, mode) \
     168             :     LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     169             : #define UnlockTupleTuplock(rel, tup, mode) \
     170             :     UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     171             : #define ConditionalLockTupleTuplock(rel, tup, mode) \
     172             :     ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     173             : 
     174             : #ifdef USE_PREFETCH
     175             : /*
     176             :  * heap_index_delete_tuples and index_delete_prefetch_buffer use this
     177             :  * structure to coordinate prefetching activity
     178             :  */
     179             : typedef struct
     180             : {
     181             :     BlockNumber cur_hblkno;
     182             :     int         next_item;
     183             :     int         ndeltids;
     184             :     TM_IndexDelete *deltids;
     185             : } IndexDeletePrefetchState;
     186             : #endif
     187             : 
     188             : /* heap_index_delete_tuples bottom-up index deletion costing constants */
     189             : #define BOTTOMUP_MAX_NBLOCKS            6
     190             : #define BOTTOMUP_TOLERANCE_NBLOCKS      3
     191             : 
     192             : /*
     193             :  * heap_index_delete_tuples uses this when determining which heap blocks it
     194             :  * must visit to help its bottom-up index deletion caller
     195             :  */
     196             : typedef struct IndexDeleteCounts
     197             : {
     198             :     int16       npromisingtids; /* Number of "promising" TIDs in group */
     199             :     int16       ntids;          /* Number of TIDs in group */
     200             :     int16       ifirsttid;      /* Offset to group's first deltid */
     201             : } IndexDeleteCounts;
     202             : 
     203             : /*
     204             :  * This table maps tuple lock strength values for each particular
     205             :  * MultiXactStatus value.
     206             :  */
     207             : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
     208             : {
     209             :     LockTupleKeyShare,          /* ForKeyShare */
     210             :     LockTupleShare,             /* ForShare */
     211             :     LockTupleNoKeyExclusive,    /* ForNoKeyUpdate */
     212             :     LockTupleExclusive,         /* ForUpdate */
     213             :     LockTupleNoKeyExclusive,    /* NoKeyUpdate */
     214             :     LockTupleExclusive          /* Update */
     215             : };
     216             : 
     217             : /* Get the LockTupleMode for a given MultiXactStatus */
     218             : #define TUPLOCK_from_mxstatus(status) \
     219             :             (MultiXactStatusLock[(status)])
     220             : 
     221             : /* ----------------------------------------------------------------
     222             :  *                       heap support routines
     223             :  * ----------------------------------------------------------------
     224             :  */
     225             : 
     226             : /*
     227             :  * Streaming read API callback for parallel sequential scans. Returns the next
     228             :  * block the caller wants from the read stream or InvalidBlockNumber when done.
     229             :  */
     230             : static BlockNumber
     231      200934 : heap_scan_stream_read_next_parallel(ReadStream *stream,
     232             :                                     void *callback_private_data,
     233             :                                     void *per_buffer_data)
     234             : {
     235      200934 :     HeapScanDesc scan = (HeapScanDesc) callback_private_data;
     236             : 
     237             :     Assert(ScanDirectionIsForward(scan->rs_dir));
     238             :     Assert(scan->rs_base.rs_parallel);
     239             : 
     240      200934 :     if (unlikely(!scan->rs_inited))
     241             :     {
     242             :         /* parallel scan */
     243        2764 :         table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
     244        2764 :                                                  scan->rs_parallelworkerdata,
     245        2764 :                                                  (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
     246             : 
     247             :         /* may return InvalidBlockNumber if there are no more blocks */
     248        5528 :         scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     249        2764 :                                                                     scan->rs_parallelworkerdata,
     250        2764 :                                                                     (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
     251        2764 :         scan->rs_inited = true;
     252             :     }
     253             :     else
     254             :     {
     255      198170 :         scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     256      198170 :                                                                     scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
     257      198170 :                                                                     scan->rs_base.rs_parallel);
     258             :     }
     259             : 
     260      200934 :     return scan->rs_prefetch_block;
     261             : }
     262             : 
     263             : /*
     264             :  * Streaming read API callback for serial sequential and TID range scans.
     265             :  * Returns the next block the caller wants from the read stream or
     266             :  * InvalidBlockNumber when done.
     267             :  */
     268             : static BlockNumber
     269     6091668 : heap_scan_stream_read_next_serial(ReadStream *stream,
     270             :                                   void *callback_private_data,
     271             :                                   void *per_buffer_data)
     272             : {
     273     6091668 :     HeapScanDesc scan = (HeapScanDesc) callback_private_data;
     274             : 
     275     6091668 :     if (unlikely(!scan->rs_inited))
     276             :     {
     277     1547898 :         scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
     278     1547898 :         scan->rs_inited = true;
     279             :     }
     280             :     else
     281     4543770 :         scan->rs_prefetch_block = heapgettup_advance_block(scan,
     282             :                                                            scan->rs_prefetch_block,
     283             :                                                            scan->rs_dir);
     284             : 
     285     6091668 :     return scan->rs_prefetch_block;
     286             : }
     287             : 
     288             : /* ----------------
     289             :  *      initscan - scan code common to heap_beginscan and heap_rescan
     290             :  * ----------------
     291             :  */
     292             : static void
     293     1587868 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
     294             : {
     295     1587868 :     ParallelBlockTableScanDesc bpscan = NULL;
     296             :     bool        allow_strat;
     297             :     bool        allow_sync;
     298             : 
     299             :     /*
     300             :      * Determine the number of blocks we have to scan.
     301             :      *
     302             :      * It is sufficient to do this once at scan start, since any tuples added
     303             :      * while the scan is in progress will be invisible to my snapshot anyway.
     304             :      * (That is not true when using a non-MVCC snapshot.  However, we couldn't
     305             :      * guarantee to return tuples added after scan start anyway, since they
     306             :      * might go into pages we already scanned.  To guarantee consistent
     307             :      * results for a non-MVCC snapshot, the caller must hold some higher-level
     308             :      * lock that ensures the interesting tuple(s) won't change.)
     309             :      */
     310     1587868 :     if (scan->rs_base.rs_parallel != NULL)
     311             :     {
     312        3964 :         bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     313        3964 :         scan->rs_nblocks = bpscan->phs_nblocks;
     314             :     }
     315             :     else
     316     1583904 :         scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
     317             : 
     318             :     /*
     319             :      * If the table is large relative to NBuffers, use a bulk-read access
     320             :      * strategy and enable synchronized scanning (see syncscan.c).  Although
     321             :      * the thresholds for these features could be different, we make them the
     322             :      * same so that there are only two behaviors to tune rather than four.
     323             :      * (However, some callers need to be able to disable one or both of these
     324             :      * behaviors, independently of the size of the table; also there is a GUC
     325             :      * variable that can disable synchronized scanning.)
     326             :      *
     327             :      * Note that table_block_parallelscan_initialize has a very similar test;
     328             :      * if you change this, consider changing that one, too.
     329             :      */
     330     1587864 :     if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
     331     1576118 :         scan->rs_nblocks > NBuffers / 4)
     332             :     {
     333       21280 :         allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
     334       21280 :         allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
     335             :     }
     336             :     else
     337     1566584 :         allow_strat = allow_sync = false;
     338             : 
     339     1587864 :     if (allow_strat)
     340             :     {
     341             :         /* During a rescan, keep the previous strategy object. */
     342       18794 :         if (scan->rs_strategy == NULL)
     343       18592 :             scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
     344             :     }
     345             :     else
     346             :     {
     347     1569070 :         if (scan->rs_strategy != NULL)
     348           0 :             FreeAccessStrategy(scan->rs_strategy);
     349     1569070 :         scan->rs_strategy = NULL;
     350             :     }
     351             : 
     352     1587864 :     if (scan->rs_base.rs_parallel != NULL)
     353             :     {
     354             :         /* For parallel scan, believe whatever ParallelTableScanDesc says. */
     355        3964 :         if (scan->rs_base.rs_parallel->phs_syncscan)
     356           4 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     357             :         else
     358        3960 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     359             :     }
     360     1583900 :     else if (keep_startblock)
     361             :     {
     362             :         /*
     363             :          * When rescanning, we want to keep the previous startblock setting,
     364             :          * so that rewinding a cursor doesn't generate surprising results.
     365             :          * Reset the active syncscan setting, though.
     366             :          */
     367      980620 :         if (allow_sync && synchronize_seqscans)
     368          40 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     369             :         else
     370      980580 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     371             :     }
     372      603280 :     else if (allow_sync && synchronize_seqscans)
     373             :     {
     374         118 :         scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     375         118 :         scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
     376             :     }
     377             :     else
     378             :     {
     379      603162 :         scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     380      603162 :         scan->rs_startblock = 0;
     381             :     }
     382             : 
     383     1587864 :     scan->rs_numblocks = InvalidBlockNumber;
     384     1587864 :     scan->rs_inited = false;
     385     1587864 :     scan->rs_ctup.t_data = NULL;
     386     1587864 :     ItemPointerSetInvalid(&scan->rs_ctup.t_self);
     387     1587864 :     scan->rs_cbuf = InvalidBuffer;
     388     1587864 :     scan->rs_cblock = InvalidBlockNumber;
     389             : 
     390             :     /*
     391             :      * Initialize to ForwardScanDirection because it is most common and
     392             :      * because heap scans go forward before going backward (e.g. CURSORs).
     393             :      */
     394     1587864 :     scan->rs_dir = ForwardScanDirection;
     395     1587864 :     scan->rs_prefetch_block = InvalidBlockNumber;
     396             : 
     397             :     /* page-at-a-time fields are always invalid when not rs_inited */
     398             : 
     399             :     /*
     400             :      * copy the scan key, if appropriate
     401             :      */
     402     1587864 :     if (key != NULL && scan->rs_base.rs_nkeys > 0)
     403      339994 :         memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
     404             : 
     405             :     /*
     406             :      * Currently, we only have a stats counter for sequential heap scans (but
     407             :      * e.g for bitmap scans the underlying bitmap index scans will be counted,
     408             :      * and for sample scans we update stats for tuple fetches).
     409             :      */
     410     1587864 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
     411     1552688 :         pgstat_count_heap_scan(scan->rs_base.rs_rd);
     412     1587864 : }
     413             : 
     414             : /*
     415             :  * heap_setscanlimits - restrict range of a heapscan
     416             :  *
     417             :  * startBlk is the page to start at
     418             :  * numBlks is number of pages to scan (InvalidBlockNumber means "all")
     419             :  */
     420             : void
     421        3748 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
     422             : {
     423        3748 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     424             : 
     425             :     Assert(!scan->rs_inited);    /* else too late to change */
     426             :     /* else rs_startblock is significant */
     427             :     Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
     428             : 
     429             :     /* Check startBlk is valid (but allow case of zero blocks...) */
     430             :     Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
     431             : 
     432        3748 :     scan->rs_startblock = startBlk;
     433        3748 :     scan->rs_numblocks = numBlks;
     434        3748 : }
     435             : 
     436             : /*
     437             :  * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
     438             :  * multiple times, with constant arguments for all_visible,
     439             :  * check_serializable.
     440             :  */
     441             : pg_attribute_always_inline
     442             : static int
     443     3861078 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
     444             :                     Page page, Buffer buffer,
     445             :                     BlockNumber block, int lines,
     446             :                     bool all_visible, bool check_serializable)
     447             : {
     448     3861078 :     int         ntup = 0;
     449             :     OffsetNumber lineoff;
     450             : 
     451   198110816 :     for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
     452             :     {
     453   194249754 :         ItemId      lpp = PageGetItemId(page, lineoff);
     454             :         HeapTupleData loctup;
     455             :         bool        valid;
     456             : 
     457   194249754 :         if (!ItemIdIsNormal(lpp))
     458    36096818 :             continue;
     459             : 
     460   158152936 :         loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
     461   158152936 :         loctup.t_len = ItemIdGetLength(lpp);
     462   158152936 :         loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
     463   158152936 :         ItemPointerSet(&(loctup.t_self), block, lineoff);
     464             : 
     465   158152936 :         if (all_visible)
     466    52151238 :             valid = true;
     467             :         else
     468   106001698 :             valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
     469             : 
     470   158152936 :         if (check_serializable)
     471        2834 :             HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
     472             :                                                 &loctup, buffer, snapshot);
     473             : 
     474   158152920 :         if (valid)
     475             :         {
     476   145908068 :             scan->rs_vistuples[ntup] = lineoff;
     477   145908068 :             ntup++;
     478             :         }
     479             :     }
     480             : 
     481             :     Assert(ntup <= MaxHeapTuplesPerPage);
     482             : 
     483     3861062 :     return ntup;
     484             : }
     485             : 
     486             : /*
     487             :  * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
     488             :  *
     489             :  * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
     490             :  * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
     491             :  */
     492             : void
     493     3861078 : heap_prepare_pagescan(TableScanDesc sscan)
     494             : {
     495     3861078 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     496     3861078 :     Buffer      buffer = scan->rs_cbuf;
     497     3861078 :     BlockNumber block = scan->rs_cblock;
     498             :     Snapshot    snapshot;
     499             :     Page        page;
     500             :     int         lines;
     501             :     bool        all_visible;
     502             :     bool        check_serializable;
     503             : 
     504             :     Assert(BufferGetBlockNumber(buffer) == block);
     505             : 
     506             :     /* ensure we're not accidentally being used when not in pagemode */
     507             :     Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
     508     3861078 :     snapshot = scan->rs_base.rs_snapshot;
     509             : 
     510             :     /*
     511             :      * Prune and repair fragmentation for the whole page, if possible.
     512             :      */
     513     3861078 :     heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
     514             : 
     515             :     /*
     516             :      * We must hold share lock on the buffer content while examining tuple
     517             :      * visibility.  Afterwards, however, the tuples we have found to be
     518             :      * visible are guaranteed good as long as we hold the buffer pin.
     519             :      */
     520     3861078 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
     521             : 
     522     3861078 :     page = BufferGetPage(buffer);
     523     3861078 :     lines = PageGetMaxOffsetNumber(page);
     524             : 
     525             :     /*
     526             :      * If the all-visible flag indicates that all tuples on the page are
     527             :      * visible to everyone, we can skip the per-tuple visibility tests.
     528             :      *
     529             :      * Note: In hot standby, a tuple that's already visible to all
     530             :      * transactions on the primary might still be invisible to a read-only
     531             :      * transaction in the standby. We partly handle this problem by tracking
     532             :      * the minimum xmin of visible tuples as the cut-off XID while marking a
     533             :      * page all-visible on the primary and WAL log that along with the
     534             :      * visibility map SET operation. In hot standby, we wait for (or abort)
     535             :      * all transactions that can potentially may not see one or more tuples on
     536             :      * the page. That's how index-only scans work fine in hot standby. A
     537             :      * crucial difference between index-only scans and heap scans is that the
     538             :      * index-only scan completely relies on the visibility map where as heap
     539             :      * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
     540             :      * the page-level flag can be trusted in the same way, because it might
     541             :      * get propagated somehow without being explicitly WAL-logged, e.g. via a
     542             :      * full page write. Until we can prove that beyond doubt, let's check each
     543             :      * tuple for visibility the hard way.
     544             :      */
     545     3861078 :     all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
     546             :     check_serializable =
     547     3861078 :         CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
     548             : 
     549             :     /*
     550             :      * We call page_collect_tuples() with constant arguments, to get the
     551             :      * compiler to constant fold the constant arguments. Separate calls with
     552             :      * constant arguments, rather than variables, are needed on several
     553             :      * compilers to actually perform constant folding.
     554             :      */
     555     3861078 :     if (likely(all_visible))
     556             :     {
     557     1194822 :         if (likely(!check_serializable))
     558     1194822 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     559             :                                                    block, lines, true, false);
     560             :         else
     561           0 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     562             :                                                    block, lines, true, true);
     563             :     }
     564             :     else
     565             :     {
     566     2666256 :         if (likely(!check_serializable))
     567     2665000 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     568             :                                                    block, lines, false, false);
     569             :         else
     570        1256 :             scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
     571             :                                                    block, lines, false, true);
     572             :     }
     573             : 
     574     3861062 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
     575     3861062 : }
     576             : 
     577             : /*
     578             :  * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
     579             :  *
     580             :  * Read the next block of the scan relation from the read stream and save it
     581             :  * in the scan descriptor.  It is already pinned.
     582             :  */
     583             : static inline void
     584     5342954 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
     585             : {
     586             :     Assert(scan->rs_read_stream);
     587             : 
     588             :     /* release previous scan buffer, if any */
     589     5342954 :     if (BufferIsValid(scan->rs_cbuf))
     590             :     {
     591     3792286 :         ReleaseBuffer(scan->rs_cbuf);
     592     3792286 :         scan->rs_cbuf = InvalidBuffer;
     593             :     }
     594             : 
     595             :     /*
     596             :      * Be sure to check for interrupts at least once per page.  Checks at
     597             :      * higher code levels won't be able to stop a seqscan that encounters many
     598             :      * pages' worth of consecutive dead tuples.
     599             :      */
     600     5342954 :     CHECK_FOR_INTERRUPTS();
     601             : 
     602             :     /*
     603             :      * If the scan direction is changing, reset the prefetch block to the
     604             :      * current block. Otherwise, we will incorrectly prefetch the blocks
     605             :      * between the prefetch block and the current block again before
     606             :      * prefetching blocks in the new, correct scan direction.
     607             :      */
     608     5342944 :     if (unlikely(scan->rs_dir != dir))
     609             :     {
     610         154 :         scan->rs_prefetch_block = scan->rs_cblock;
     611         154 :         read_stream_reset(scan->rs_read_stream);
     612             :     }
     613             : 
     614     5342944 :     scan->rs_dir = dir;
     615             : 
     616     5342944 :     scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
     617     5342944 :     if (BufferIsValid(scan->rs_cbuf))
     618     4036380 :         scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
     619     5342944 : }
     620             : 
     621             : /*
     622             :  * heapgettup_initial_block - return the first BlockNumber to scan
     623             :  *
     624             :  * Returns InvalidBlockNumber when there are no blocks to scan.  This can
     625             :  * occur with empty tables and in parallel scans when parallel workers get all
     626             :  * of the pages before we can get a chance to get our first page.
     627             :  */
     628             : static pg_noinline BlockNumber
     629     1547898 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
     630             : {
     631             :     Assert(!scan->rs_inited);
     632             :     Assert(scan->rs_base.rs_parallel == NULL);
     633             : 
     634             :     /* When there are no pages to scan, return InvalidBlockNumber */
     635     1547898 :     if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     636      758948 :         return InvalidBlockNumber;
     637             : 
     638      788950 :     if (ScanDirectionIsForward(dir))
     639             :     {
     640      788886 :         return scan->rs_startblock;
     641             :     }
     642             :     else
     643             :     {
     644             :         /*
     645             :          * Disable reporting to syncscan logic in a backwards scan; it's not
     646             :          * very likely anyone else is doing the same thing at the same time,
     647             :          * and much more likely that we'll just bollix things for forward
     648             :          * scanners.
     649             :          */
     650          64 :         scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     651             : 
     652             :         /*
     653             :          * Start from last page of the scan.  Ensure we take into account
     654             :          * rs_numblocks if it's been adjusted by heap_setscanlimits().
     655             :          */
     656          64 :         if (scan->rs_numblocks != InvalidBlockNumber)
     657           6 :             return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
     658             : 
     659          58 :         if (scan->rs_startblock > 0)
     660           0 :             return scan->rs_startblock - 1;
     661             : 
     662          58 :         return scan->rs_nblocks - 1;
     663             :     }
     664             : }
     665             : 
     666             : 
     667             : /*
     668             :  * heapgettup_start_page - helper function for heapgettup()
     669             :  *
     670             :  * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
     671             :  * to the number of tuples on this page.  Also set *lineoff to the first
     672             :  * offset to scan with forward scans getting the first offset and backward
     673             :  * getting the final offset on the page.
     674             :  */
     675             : static Page
     676      183854 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
     677             :                       OffsetNumber *lineoff)
     678             : {
     679             :     Page        page;
     680             : 
     681             :     Assert(scan->rs_inited);
     682             :     Assert(BufferIsValid(scan->rs_cbuf));
     683             : 
     684             :     /* Caller is responsible for ensuring buffer is locked if needed */
     685      183854 :     page = BufferGetPage(scan->rs_cbuf);
     686             : 
     687      183854 :     *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
     688             : 
     689      183854 :     if (ScanDirectionIsForward(dir))
     690      183854 :         *lineoff = FirstOffsetNumber;
     691             :     else
     692           0 :         *lineoff = (OffsetNumber) (*linesleft);
     693             : 
     694             :     /* lineoff now references the physically previous or next tid */
     695      183854 :     return page;
     696             : }
     697             : 
     698             : 
     699             : /*
     700             :  * heapgettup_continue_page - helper function for heapgettup()
     701             :  *
     702             :  * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
     703             :  * to the number of tuples left to scan on this page.  Also set *lineoff to
     704             :  * the next offset to scan according to the ScanDirection in 'dir'.
     705             :  */
     706             : static inline Page
     707    15440772 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
     708             :                          OffsetNumber *lineoff)
     709             : {
     710             :     Page        page;
     711             : 
     712             :     Assert(scan->rs_inited);
     713             :     Assert(BufferIsValid(scan->rs_cbuf));
     714             : 
     715             :     /* Caller is responsible for ensuring buffer is locked if needed */
     716    15440772 :     page = BufferGetPage(scan->rs_cbuf);
     717             : 
     718    15440772 :     if (ScanDirectionIsForward(dir))
     719             :     {
     720    15440772 :         *lineoff = OffsetNumberNext(scan->rs_coffset);
     721    15440772 :         *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
     722             :     }
     723             :     else
     724             :     {
     725             :         /*
     726             :          * The previous returned tuple may have been vacuumed since the
     727             :          * previous scan when we use a non-MVCC snapshot, so we must
     728             :          * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
     729             :          */
     730           0 :         *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
     731           0 :         *linesleft = *lineoff;
     732             :     }
     733             : 
     734             :     /* lineoff now references the physically previous or next tid */
     735    15440772 :     return page;
     736             : }
     737             : 
     738             : /*
     739             :  * heapgettup_advance_block - helper for heap_fetch_next_buffer()
     740             :  *
     741             :  * Given the current block number, the scan direction, and various information
     742             :  * contained in the scan descriptor, calculate the BlockNumber to scan next
     743             :  * and return it.  If there are no further blocks to scan, return
     744             :  * InvalidBlockNumber to indicate this fact to the caller.
     745             :  *
     746             :  * This should not be called to determine the initial block number -- only for
     747             :  * subsequent blocks.
     748             :  *
     749             :  * This also adjusts rs_numblocks when a limit has been imposed by
     750             :  * heap_setscanlimits().
     751             :  */
     752             : static inline BlockNumber
     753     4543770 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
     754             : {
     755             :     Assert(scan->rs_base.rs_parallel == NULL);
     756             : 
     757     4543770 :     if (likely(ScanDirectionIsForward(dir)))
     758             :     {
     759     4543652 :         block++;
     760             : 
     761             :         /* wrap back to the start of the heap */
     762     4543652 :         if (block >= scan->rs_nblocks)
     763      698248 :             block = 0;
     764             : 
     765             :         /*
     766             :          * Report our new scan position for synchronization purposes. We don't
     767             :          * do that when moving backwards, however. That would just mess up any
     768             :          * other forward-moving scanners.
     769             :          *
     770             :          * Note: we do this before checking for end of scan so that the final
     771             :          * state of the position hint is back at the start of the rel.  That's
     772             :          * not strictly necessary, but otherwise when you run the same query
     773             :          * multiple times the starting position would shift a little bit
     774             :          * backwards on every invocation, which is confusing. We don't
     775             :          * guarantee any specific ordering in general, though.
     776             :          */
     777     4543652 :         if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
     778       17574 :             ss_report_location(scan->rs_base.rs_rd, block);
     779             : 
     780             :         /* we're done if we're back at where we started */
     781     4543652 :         if (block == scan->rs_startblock)
     782      698166 :             return InvalidBlockNumber;
     783             : 
     784             :         /* check if the limit imposed by heap_setscanlimits() is met */
     785     3845486 :         if (scan->rs_numblocks != InvalidBlockNumber)
     786             :         {
     787        3180 :             if (--scan->rs_numblocks == 0)
     788        3052 :                 return InvalidBlockNumber;
     789             :         }
     790             : 
     791     3842434 :         return block;
     792             :     }
     793             :     else
     794             :     {
     795             :         /* we're done if the last block is the start position */
     796         118 :         if (block == scan->rs_startblock)
     797         118 :             return InvalidBlockNumber;
     798             : 
     799             :         /* check if the limit imposed by heap_setscanlimits() is met */
     800           0 :         if (scan->rs_numblocks != InvalidBlockNumber)
     801             :         {
     802           0 :             if (--scan->rs_numblocks == 0)
     803           0 :                 return InvalidBlockNumber;
     804             :         }
     805             : 
     806             :         /* wrap to the end of the heap when the last page was page 0 */
     807           0 :         if (block == 0)
     808           0 :             block = scan->rs_nblocks;
     809             : 
     810           0 :         block--;
     811             : 
     812           0 :         return block;
     813             :     }
     814             : }
     815             : 
     816             : /* ----------------
     817             :  *      heapgettup - fetch next heap tuple
     818             :  *
     819             :  *      Initialize the scan if not already done; then advance to the next
     820             :  *      tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
     821             :  *      or set scan->rs_ctup.t_data = NULL if no more tuples.
     822             :  *
     823             :  * Note: the reason nkeys/key are passed separately, even though they are
     824             :  * kept in the scan descriptor, is that the caller may not want us to check
     825             :  * the scankeys.
     826             :  *
     827             :  * Note: when we fall off the end of the scan in either direction, we
     828             :  * reset rs_inited.  This means that a further request with the same
     829             :  * scan direction will restart the scan, which is a bit odd, but a
     830             :  * request with the opposite scan direction will start a fresh scan
     831             :  * in the proper direction.  The latter is required behavior for cursors,
     832             :  * while the former case is generally undefined behavior in Postgres
     833             :  * so we don't care too much.
     834             :  * ----------------
     835             :  */
     836             : static void
     837    15479290 : heapgettup(HeapScanDesc scan,
     838             :            ScanDirection dir,
     839             :            int nkeys,
     840             :            ScanKey key)
     841             : {
     842    15479290 :     HeapTuple   tuple = &(scan->rs_ctup);
     843             :     Page        page;
     844             :     OffsetNumber lineoff;
     845             :     int         linesleft;
     846             : 
     847    15479290 :     if (likely(scan->rs_inited))
     848             :     {
     849             :         /* continue from previously returned page/tuple */
     850    15440772 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     851    15440772 :         page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
     852    15440772 :         goto continue_page;
     853             :     }
     854             : 
     855             :     /*
     856             :      * advance the scan until we find a qualifying tuple or run out of stuff
     857             :      * to scan
     858             :      */
     859             :     while (true)
     860             :     {
     861      222074 :         heap_fetch_next_buffer(scan, dir);
     862             : 
     863             :         /* did we run out of blocks to scan? */
     864      222074 :         if (!BufferIsValid(scan->rs_cbuf))
     865       38220 :             break;
     866             : 
     867             :         Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
     868             : 
     869      183854 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     870      183854 :         page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
     871    15624626 : continue_page:
     872             : 
     873             :         /*
     874             :          * Only continue scanning the page while we have lines left.
     875             :          *
     876             :          * Note that this protects us from accessing line pointers past
     877             :          * PageGetMaxOffsetNumber(); both for forward scans when we resume the
     878             :          * table scan, and for when we start scanning a new page.
     879             :          */
     880    15713960 :         for (; linesleft > 0; linesleft--, lineoff += dir)
     881             :         {
     882             :             bool        visible;
     883    15530404 :             ItemId      lpp = PageGetItemId(page, lineoff);
     884             : 
     885    15530404 :             if (!ItemIdIsNormal(lpp))
     886       78968 :                 continue;
     887             : 
     888    15451436 :             tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
     889    15451436 :             tuple->t_len = ItemIdGetLength(lpp);
     890    15451436 :             ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
     891             : 
     892    15451436 :             visible = HeapTupleSatisfiesVisibility(tuple,
     893             :                                                    scan->rs_base.rs_snapshot,
     894             :                                                    scan->rs_cbuf);
     895             : 
     896    15451436 :             HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
     897             :                                                 tuple, scan->rs_cbuf,
     898             :                                                 scan->rs_base.rs_snapshot);
     899             : 
     900             :             /* skip tuples not visible to this snapshot */
     901    15451436 :             if (!visible)
     902       10366 :                 continue;
     903             : 
     904             :             /* skip any tuples that don't match the scan key */
     905    15441070 :             if (key != NULL &&
     906           0 :                 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
     907             :                              nkeys, key))
     908           0 :                 continue;
     909             : 
     910    15441070 :             LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
     911    15441070 :             scan->rs_coffset = lineoff;
     912    15441070 :             return;
     913             :         }
     914             : 
     915             :         /*
     916             :          * if we get here, it means we've exhausted the items on this page and
     917             :          * it's time to move to the next.
     918             :          */
     919      183556 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
     920             :     }
     921             : 
     922             :     /* end of scan */
     923       38220 :     if (BufferIsValid(scan->rs_cbuf))
     924           0 :         ReleaseBuffer(scan->rs_cbuf);
     925             : 
     926       38220 :     scan->rs_cbuf = InvalidBuffer;
     927       38220 :     scan->rs_cblock = InvalidBlockNumber;
     928       38220 :     scan->rs_prefetch_block = InvalidBlockNumber;
     929       38220 :     tuple->t_data = NULL;
     930       38220 :     scan->rs_inited = false;
     931             : }
     932             : 
     933             : /* ----------------
     934             :  *      heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
     935             :  *
     936             :  *      Same API as heapgettup, but used in page-at-a-time mode
     937             :  *
     938             :  * The internal logic is much the same as heapgettup's too, but there are some
     939             :  * differences: we do not take the buffer content lock (that only needs to
     940             :  * happen inside heap_prepare_pagescan), and we iterate through just the
     941             :  * tuples listed in rs_vistuples[] rather than all tuples on the page.  Notice
     942             :  * that lineindex is 0-based, where the corresponding loop variable lineoff in
     943             :  * heapgettup is 1-based.
     944             :  * ----------------
     945             :  */
     946             : static void
     947    74302040 : heapgettup_pagemode(HeapScanDesc scan,
     948             :                     ScanDirection dir,
     949             :                     int nkeys,
     950             :                     ScanKey key)
     951             : {
     952    74302040 :     HeapTuple   tuple = &(scan->rs_ctup);
     953             :     Page        page;
     954             :     int         lineindex;
     955             :     int         linesleft;
     956             : 
     957    74302040 :     if (likely(scan->rs_inited))
     958             :     {
     959             :         /* continue from previously returned page/tuple */
     960    72789890 :         page = BufferGetPage(scan->rs_cbuf);
     961             : 
     962    72789890 :         lineindex = scan->rs_cindex + dir;
     963    72789890 :         if (ScanDirectionIsForward(dir))
     964    72789232 :             linesleft = scan->rs_ntuples - lineindex;
     965             :         else
     966         658 :             linesleft = scan->rs_cindex;
     967             :         /* lineindex now references the next or previous visible tid */
     968             : 
     969    72789890 :         goto continue_page;
     970             :     }
     971             : 
     972             :     /*
     973             :      * advance the scan until we find a qualifying tuple or run out of stuff
     974             :      * to scan
     975             :      */
     976             :     while (true)
     977             :     {
     978     5120880 :         heap_fetch_next_buffer(scan, dir);
     979             : 
     980             :         /* did we run out of blocks to scan? */
     981     5120870 :         if (!BufferIsValid(scan->rs_cbuf))
     982     1268344 :             break;
     983             : 
     984             :         Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
     985             : 
     986             :         /* prune the page and determine visible tuple offsets */
     987     3852526 :         heap_prepare_pagescan((TableScanDesc) scan);
     988     3852510 :         page = BufferGetPage(scan->rs_cbuf);
     989     3852510 :         linesleft = scan->rs_ntuples;
     990     3852510 :         lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
     991             : 
     992             :         /* lineindex now references the next or previous visible tid */
     993    76642400 : continue_page:
     994             : 
     995   144739672 :         for (; linesleft > 0; linesleft--, lineindex += dir)
     996             :         {
     997             :             ItemId      lpp;
     998             :             OffsetNumber lineoff;
     999             : 
    1000   141130942 :             lineoff = scan->rs_vistuples[lineindex];
    1001   141130942 :             lpp = PageGetItemId(page, lineoff);
    1002             :             Assert(ItemIdIsNormal(lpp));
    1003             : 
    1004   141130942 :             tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
    1005   141130942 :             tuple->t_len = ItemIdGetLength(lpp);
    1006   141130942 :             ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
    1007             : 
    1008             :             /* skip any tuples that don't match the scan key */
    1009   141130942 :             if (key != NULL &&
    1010    68584944 :                 !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
    1011             :                              nkeys, key))
    1012    68097272 :                 continue;
    1013             : 
    1014    73033670 :             scan->rs_cindex = lineindex;
    1015    73033670 :             return;
    1016             :         }
    1017             :     }
    1018             : 
    1019             :     /* end of scan */
    1020     1268344 :     if (BufferIsValid(scan->rs_cbuf))
    1021           0 :         ReleaseBuffer(scan->rs_cbuf);
    1022     1268344 :     scan->rs_cbuf = InvalidBuffer;
    1023     1268344 :     scan->rs_cblock = InvalidBlockNumber;
    1024     1268344 :     scan->rs_prefetch_block = InvalidBlockNumber;
    1025     1268344 :     tuple->t_data = NULL;
    1026     1268344 :     scan->rs_inited = false;
    1027             : }
    1028             : 
    1029             : 
    1030             : /* ----------------------------------------------------------------
    1031             :  *                   heap access method interface
    1032             :  * ----------------------------------------------------------------
    1033             :  */
    1034             : 
    1035             : 
    1036             : TableScanDesc
    1037      607140 : heap_beginscan(Relation relation, Snapshot snapshot,
    1038             :                int nkeys, ScanKey key,
    1039             :                ParallelTableScanDesc parallel_scan,
    1040             :                uint32 flags)
    1041             : {
    1042             :     HeapScanDesc scan;
    1043             : 
    1044             :     /*
    1045             :      * increment relation ref count while scanning relation
    1046             :      *
    1047             :      * This is just to make really sure the relcache entry won't go away while
    1048             :      * the scan has a pointer to it.  Caller should be holding the rel open
    1049             :      * anyway, so this is redundant in all normal scenarios...
    1050             :      */
    1051      607140 :     RelationIncrementReferenceCount(relation);
    1052             : 
    1053             :     /*
    1054             :      * allocate and initialize scan descriptor
    1055             :      */
    1056      607140 :     scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
    1057             : 
    1058      607140 :     scan->rs_base.rs_rd = relation;
    1059      607140 :     scan->rs_base.rs_snapshot = snapshot;
    1060      607140 :     scan->rs_base.rs_nkeys = nkeys;
    1061      607140 :     scan->rs_base.rs_flags = flags;
    1062      607140 :     scan->rs_base.rs_parallel = parallel_scan;
    1063      607140 :     scan->rs_strategy = NULL;    /* set in initscan */
    1064      607140 :     scan->rs_vmbuffer = InvalidBuffer;
    1065      607140 :     scan->rs_empty_tuples_pending = 0;
    1066             : 
    1067             :     /*
    1068             :      * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
    1069             :      */
    1070      607140 :     if (!(snapshot && IsMVCCSnapshot(snapshot)))
    1071       51906 :         scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1072             : 
    1073             :     /*
    1074             :      * For seqscan and sample scans in a serializable transaction, acquire a
    1075             :      * predicate lock on the entire relation. This is required not only to
    1076             :      * lock all the matching tuples, but also to conflict with new insertions
    1077             :      * into the table. In an indexscan, we take page locks on the index pages
    1078             :      * covering the range specified in the scan qual, but in a heap scan there
    1079             :      * is nothing more fine-grained to lock. A bitmap scan is a different
    1080             :      * story, there we have already scanned the index and locked the index
    1081             :      * pages covering the predicate. But in that case we still have to lock
    1082             :      * any matching heap tuples. For sample scan we could optimize the locking
    1083             :      * to be at least page-level granularity, but we'd need to add per-tuple
    1084             :      * locking for that.
    1085             :      */
    1086      607140 :     if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
    1087             :     {
    1088             :         /*
    1089             :          * Ensure a missing snapshot is noticed reliably, even if the
    1090             :          * isolation mode means predicate locking isn't performed (and
    1091             :          * therefore the snapshot isn't used here).
    1092             :          */
    1093             :         Assert(snapshot);
    1094      576198 :         PredicateLockRelation(relation, snapshot);
    1095             :     }
    1096             : 
    1097             :     /* we only need to set this up once */
    1098      607140 :     scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
    1099             : 
    1100             :     /*
    1101             :      * Allocate memory to keep track of page allocation for parallel workers
    1102             :      * when doing a parallel scan.
    1103             :      */
    1104      607140 :     if (parallel_scan != NULL)
    1105        3856 :         scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
    1106             :     else
    1107      603284 :         scan->rs_parallelworkerdata = NULL;
    1108             : 
    1109             :     /*
    1110             :      * we do this here instead of in initscan() because heap_rescan also calls
    1111             :      * initscan() and we don't want to allocate memory again
    1112             :      */
    1113      607140 :     if (nkeys > 0)
    1114      339994 :         scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
    1115             :     else
    1116      267146 :         scan->rs_base.rs_key = NULL;
    1117             : 
    1118      607140 :     initscan(scan, key, false);
    1119             : 
    1120      607136 :     scan->rs_read_stream = NULL;
    1121             : 
    1122             :     /*
    1123             :      * Set up a read stream for sequential scans and TID range scans. This
    1124             :      * should be done after initscan() because initscan() allocates the
    1125             :      * BufferAccessStrategy object passed to the streaming read API.
    1126             :      */
    1127      607136 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
    1128       31088 :         scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
    1129             :     {
    1130             :         ReadStreamBlockNumberCB cb;
    1131             : 
    1132      576160 :         if (scan->rs_base.rs_parallel)
    1133        3856 :             cb = heap_scan_stream_read_next_parallel;
    1134             :         else
    1135      572304 :             cb = heap_scan_stream_read_next_serial;
    1136             : 
    1137      576160 :         scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL,
    1138             :                                                           scan->rs_strategy,
    1139             :                                                           scan->rs_base.rs_rd,
    1140             :                                                           MAIN_FORKNUM,
    1141             :                                                           cb,
    1142             :                                                           scan,
    1143             :                                                           0);
    1144             :     }
    1145             : 
    1146             : 
    1147      607136 :     return (TableScanDesc) scan;
    1148             : }
    1149             : 
    1150             : void
    1151      980728 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
    1152             :             bool allow_strat, bool allow_sync, bool allow_pagemode)
    1153             : {
    1154      980728 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1155             : 
    1156      980728 :     if (set_params)
    1157             :     {
    1158          30 :         if (allow_strat)
    1159          30 :             scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
    1160             :         else
    1161           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
    1162             : 
    1163          30 :         if (allow_sync)
    1164          12 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
    1165             :         else
    1166          18 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
    1167             : 
    1168          30 :         if (allow_pagemode && scan->rs_base.rs_snapshot &&
    1169          30 :             IsMVCCSnapshot(scan->rs_base.rs_snapshot))
    1170          30 :             scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
    1171             :         else
    1172           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1173             :     }
    1174             : 
    1175             :     /*
    1176             :      * unpin scan buffers
    1177             :      */
    1178      980728 :     if (BufferIsValid(scan->rs_cbuf))
    1179        5490 :         ReleaseBuffer(scan->rs_cbuf);
    1180             : 
    1181      980728 :     if (BufferIsValid(scan->rs_vmbuffer))
    1182             :     {
    1183          54 :         ReleaseBuffer(scan->rs_vmbuffer);
    1184          54 :         scan->rs_vmbuffer = InvalidBuffer;
    1185             :     }
    1186             : 
    1187             :     Assert(scan->rs_empty_tuples_pending == 0);
    1188             : 
    1189             :     /*
    1190             :      * The read stream is reset on rescan. This must be done before
    1191             :      * initscan(), as some state referred to by read_stream_reset() is reset
    1192             :      * in initscan().
    1193             :      */
    1194      980728 :     if (scan->rs_read_stream)
    1195      976706 :         read_stream_reset(scan->rs_read_stream);
    1196             : 
    1197             :     /*
    1198             :      * reinitialize scan descriptor
    1199             :      */
    1200      980728 :     initscan(scan, key, true);
    1201      980728 : }
    1202             : 
    1203             : void
    1204      604614 : heap_endscan(TableScanDesc sscan)
    1205             : {
    1206      604614 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1207             : 
    1208             :     /* Note: no locking manipulations needed */
    1209             : 
    1210             :     /*
    1211             :      * unpin scan buffers
    1212             :      */
    1213      604614 :     if (BufferIsValid(scan->rs_cbuf))
    1214      251924 :         ReleaseBuffer(scan->rs_cbuf);
    1215             : 
    1216      604614 :     if (BufferIsValid(scan->rs_vmbuffer))
    1217          30 :         ReleaseBuffer(scan->rs_vmbuffer);
    1218             : 
    1219             :     Assert(scan->rs_empty_tuples_pending == 0);
    1220             : 
    1221             :     /*
    1222             :      * Must free the read stream before freeing the BufferAccessStrategy.
    1223             :      */
    1224      604614 :     if (scan->rs_read_stream)
    1225      573818 :         read_stream_end(scan->rs_read_stream);
    1226             : 
    1227             :     /*
    1228             :      * decrement relation reference count and free scan descriptor storage
    1229             :      */
    1230      604614 :     RelationDecrementReferenceCount(scan->rs_base.rs_rd);
    1231             : 
    1232      604614 :     if (scan->rs_base.rs_key)
    1233      339930 :         pfree(scan->rs_base.rs_key);
    1234             : 
    1235      604614 :     if (scan->rs_strategy != NULL)
    1236       18572 :         FreeAccessStrategy(scan->rs_strategy);
    1237             : 
    1238      604614 :     if (scan->rs_parallelworkerdata != NULL)
    1239        3856 :         pfree(scan->rs_parallelworkerdata);
    1240             : 
    1241      604614 :     if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
    1242       82264 :         UnregisterSnapshot(scan->rs_base.rs_snapshot);
    1243             : 
    1244      604614 :     pfree(scan);
    1245      604614 : }
    1246             : 
    1247             : HeapTuple
    1248    16954596 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
    1249             : {
    1250    16954596 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1251             : 
    1252             :     /*
    1253             :      * This is still widely used directly, without going through table AM, so
    1254             :      * add a safety check.  It's possible we should, at a later point,
    1255             :      * downgrade this to an assert. The reason for checking the AM routine,
    1256             :      * rather than the AM oid, is that this allows to write regression tests
    1257             :      * that create another AM reusing the heap handler.
    1258             :      */
    1259    16954596 :     if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
    1260           0 :         ereport(ERROR,
    1261             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1262             :                  errmsg_internal("only heap AM is supported")));
    1263             : 
    1264             :     /*
    1265             :      * We don't expect direct calls to heap_getnext with valid CheckXidAlive
    1266             :      * for catalog or regular tables.  See detailed comments in xact.c where
    1267             :      * these variables are declared.  Normally we have such a check at tableam
    1268             :      * level API but this is called from many places so we need to ensure it
    1269             :      * here.
    1270             :      */
    1271    16954596 :     if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
    1272           0 :         elog(ERROR, "unexpected heap_getnext call during logical decoding");
    1273             : 
    1274             :     /* Note: no locking manipulations needed */
    1275             : 
    1276    16954596 :     if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
    1277     2443964 :         heapgettup_pagemode(scan, direction,
    1278     2443964 :                             scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1279             :     else
    1280    14510632 :         heapgettup(scan, direction,
    1281    14510632 :                    scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1282             : 
    1283    16954596 :     if (scan->rs_ctup.t_data == NULL)
    1284      103554 :         return NULL;
    1285             : 
    1286             :     /*
    1287             :      * if we get here it means we have a new current scan tuple, so point to
    1288             :      * the proper return buffer and return the tuple.
    1289             :      */
    1290             : 
    1291    16851042 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1292             : 
    1293    16851042 :     return &scan->rs_ctup;
    1294             : }
    1295             : 
    1296             : bool
    1297    72820608 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
    1298             : {
    1299    72820608 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1300             : 
    1301             :     /* Note: no locking manipulations needed */
    1302             : 
    1303    72820608 :     if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1304    71851950 :         heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1305             :     else
    1306      968658 :         heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1307             : 
    1308    72820582 :     if (scan->rs_ctup.t_data == NULL)
    1309             :     {
    1310     1202916 :         ExecClearTuple(slot);
    1311     1202916 :         return false;
    1312             :     }
    1313             : 
    1314             :     /*
    1315             :      * if we get here it means we have a new current scan tuple, so point to
    1316             :      * the proper return buffer and return the tuple.
    1317             :      */
    1318             : 
    1319    71617666 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1320             : 
    1321    71617666 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
    1322             :                              scan->rs_cbuf);
    1323    71617666 :     return true;
    1324             : }
    1325             : 
    1326             : void
    1327         178 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
    1328             :                   ItemPointer maxtid)
    1329             : {
    1330         178 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1331             :     BlockNumber startBlk;
    1332             :     BlockNumber numBlks;
    1333             :     ItemPointerData highestItem;
    1334             :     ItemPointerData lowestItem;
    1335             : 
    1336             :     /*
    1337             :      * For relations without any pages, we can simply leave the TID range
    1338             :      * unset.  There will be no tuples to scan, therefore no tuples outside
    1339             :      * the given TID range.
    1340             :      */
    1341         178 :     if (scan->rs_nblocks == 0)
    1342          48 :         return;
    1343             : 
    1344             :     /*
    1345             :      * Set up some ItemPointers which point to the first and last possible
    1346             :      * tuples in the heap.
    1347             :      */
    1348         166 :     ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
    1349         166 :     ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
    1350             : 
    1351             :     /*
    1352             :      * If the given maximum TID is below the highest possible TID in the
    1353             :      * relation, then restrict the range to that, otherwise we scan to the end
    1354             :      * of the relation.
    1355             :      */
    1356         166 :     if (ItemPointerCompare(maxtid, &highestItem) < 0)
    1357         132 :         ItemPointerCopy(maxtid, &highestItem);
    1358             : 
    1359             :     /*
    1360             :      * If the given minimum TID is above the lowest possible TID in the
    1361             :      * relation, then restrict the range to only scan for TIDs above that.
    1362             :      */
    1363         166 :     if (ItemPointerCompare(mintid, &lowestItem) > 0)
    1364          52 :         ItemPointerCopy(mintid, &lowestItem);
    1365             : 
    1366             :     /*
    1367             :      * Check for an empty range and protect from would be negative results
    1368             :      * from the numBlks calculation below.
    1369             :      */
    1370         166 :     if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
    1371             :     {
    1372             :         /* Set an empty range of blocks to scan */
    1373          36 :         heap_setscanlimits(sscan, 0, 0);
    1374          36 :         return;
    1375             :     }
    1376             : 
    1377             :     /*
    1378             :      * Calculate the first block and the number of blocks we must scan. We
    1379             :      * could be more aggressive here and perform some more validation to try
    1380             :      * and further narrow the scope of blocks to scan by checking if the
    1381             :      * lowestItem has an offset above MaxOffsetNumber.  In this case, we could
    1382             :      * advance startBlk by one.  Likewise, if highestItem has an offset of 0
    1383             :      * we could scan one fewer blocks.  However, such an optimization does not
    1384             :      * seem worth troubling over, currently.
    1385             :      */
    1386         130 :     startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
    1387             : 
    1388         130 :     numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
    1389         130 :         ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
    1390             : 
    1391             :     /* Set the start block and number of blocks to scan */
    1392         130 :     heap_setscanlimits(sscan, startBlk, numBlks);
    1393             : 
    1394             :     /* Finally, set the TID range in sscan */
    1395         130 :     ItemPointerCopy(&lowestItem, &sscan->rs_mintid);
    1396         130 :     ItemPointerCopy(&highestItem, &sscan->rs_maxtid);
    1397             : }
    1398             : 
    1399             : bool
    1400        5940 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
    1401             :                           TupleTableSlot *slot)
    1402             : {
    1403        5940 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1404        5940 :     ItemPointer mintid = &sscan->rs_mintid;
    1405        5940 :     ItemPointer maxtid = &sscan->rs_maxtid;
    1406             : 
    1407             :     /* Note: no locking manipulations needed */
    1408             :     for (;;)
    1409             :     {
    1410        6126 :         if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1411        6126 :             heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1412             :         else
    1413           0 :             heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1414             : 
    1415        6126 :         if (scan->rs_ctup.t_data == NULL)
    1416             :         {
    1417          94 :             ExecClearTuple(slot);
    1418          94 :             return false;
    1419             :         }
    1420             : 
    1421             :         /*
    1422             :          * heap_set_tidrange will have used heap_setscanlimits to limit the
    1423             :          * range of pages we scan to only ones that can contain the TID range
    1424             :          * we're scanning for.  Here we must filter out any tuples from these
    1425             :          * pages that are outside of that range.
    1426             :          */
    1427        6032 :         if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
    1428             :         {
    1429         186 :             ExecClearTuple(slot);
    1430             : 
    1431             :             /*
    1432             :              * When scanning backwards, the TIDs will be in descending order.
    1433             :              * Future tuples in this direction will be lower still, so we can
    1434             :              * just return false to indicate there will be no more tuples.
    1435             :              */
    1436         186 :             if (ScanDirectionIsBackward(direction))
    1437           0 :                 return false;
    1438             : 
    1439         186 :             continue;
    1440             :         }
    1441             : 
    1442             :         /*
    1443             :          * Likewise for the final page, we must filter out TIDs greater than
    1444             :          * maxtid.
    1445             :          */
    1446        5846 :         if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
    1447             :         {
    1448          72 :             ExecClearTuple(slot);
    1449             : 
    1450             :             /*
    1451             :              * When scanning forward, the TIDs will be in ascending order.
    1452             :              * Future tuples in this direction will be higher still, so we can
    1453             :              * just return false to indicate there will be no more tuples.
    1454             :              */
    1455          72 :             if (ScanDirectionIsForward(direction))
    1456          72 :                 return false;
    1457           0 :             continue;
    1458             :         }
    1459             : 
    1460        5774 :         break;
    1461             :     }
    1462             : 
    1463             :     /*
    1464             :      * if we get here it means we have a new current scan tuple, so point to
    1465             :      * the proper return buffer and return the tuple.
    1466             :      */
    1467        5774 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1468             : 
    1469        5774 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
    1470        5774 :     return true;
    1471             : }
    1472             : 
    1473             : /*
    1474             :  *  heap_fetch      - retrieve tuple with given tid
    1475             :  *
    1476             :  * On entry, tuple->t_self is the TID to fetch.  We pin the buffer holding
    1477             :  * the tuple, fill in the remaining fields of *tuple, and check the tuple
    1478             :  * against the specified snapshot.
    1479             :  *
    1480             :  * If successful (tuple found and passes snapshot time qual), then *userbuf
    1481             :  * is set to the buffer holding the tuple and true is returned.  The caller
    1482             :  * must unpin the buffer when done with the tuple.
    1483             :  *
    1484             :  * If the tuple is not found (ie, item number references a deleted slot),
    1485             :  * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
    1486             :  * and false is returned.
    1487             :  *
    1488             :  * If the tuple is found but fails the time qual check, then the behavior
    1489             :  * depends on the keep_buf parameter.  If keep_buf is false, the results
    1490             :  * are the same as for the tuple-not-found case.  If keep_buf is true,
    1491             :  * then tuple->t_data and *userbuf are returned as for the success case,
    1492             :  * and again the caller must unpin the buffer; but false is returned.
    1493             :  *
    1494             :  * heap_fetch does not follow HOT chains: only the exact TID requested will
    1495             :  * be fetched.
    1496             :  *
    1497             :  * It is somewhat inconsistent that we ereport() on invalid block number but
    1498             :  * return false on invalid item number.  There are a couple of reasons though.
    1499             :  * One is that the caller can relatively easily check the block number for
    1500             :  * validity, but cannot check the item number without reading the page
    1501             :  * himself.  Another is that when we are following a t_ctid link, we can be
    1502             :  * reasonably confident that the page number is valid (since VACUUM shouldn't
    1503             :  * truncate off the destination page without having killed the referencing
    1504             :  * tuple first), but the item number might well not be good.
    1505             :  */
    1506             : bool
    1507      614746 : heap_fetch(Relation relation,
    1508             :            Snapshot snapshot,
    1509             :            HeapTuple tuple,
    1510             :            Buffer *userbuf,
    1511             :            bool keep_buf)
    1512             : {
    1513      614746 :     ItemPointer tid = &(tuple->t_self);
    1514             :     ItemId      lp;
    1515             :     Buffer      buffer;
    1516             :     Page        page;
    1517             :     OffsetNumber offnum;
    1518             :     bool        valid;
    1519             : 
    1520             :     /*
    1521             :      * Fetch and pin the appropriate page of the relation.
    1522             :      */
    1523      614746 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    1524             : 
    1525             :     /*
    1526             :      * Need share lock on buffer to examine tuple commit status.
    1527             :      */
    1528      614746 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1529      614746 :     page = BufferGetPage(buffer);
    1530             : 
    1531             :     /*
    1532             :      * We'd better check for out-of-range offnum in case of VACUUM since the
    1533             :      * TID was obtained.
    1534             :      */
    1535      614746 :     offnum = ItemPointerGetOffsetNumber(tid);
    1536      614746 :     if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1537             :     {
    1538           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1539           0 :         ReleaseBuffer(buffer);
    1540           0 :         *userbuf = InvalidBuffer;
    1541           0 :         tuple->t_data = NULL;
    1542           0 :         return false;
    1543             :     }
    1544             : 
    1545             :     /*
    1546             :      * get the item line pointer corresponding to the requested tid
    1547             :      */
    1548      614746 :     lp = PageGetItemId(page, offnum);
    1549             : 
    1550             :     /*
    1551             :      * Must check for deleted tuple.
    1552             :      */
    1553      614746 :     if (!ItemIdIsNormal(lp))
    1554             :     {
    1555        3714 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1556        3714 :         ReleaseBuffer(buffer);
    1557        3714 :         *userbuf = InvalidBuffer;
    1558        3714 :         tuple->t_data = NULL;
    1559        3714 :         return false;
    1560             :     }
    1561             : 
    1562             :     /*
    1563             :      * fill in *tuple fields
    1564             :      */
    1565      611032 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1566      611032 :     tuple->t_len = ItemIdGetLength(lp);
    1567      611032 :     tuple->t_tableOid = RelationGetRelid(relation);
    1568             : 
    1569             :     /*
    1570             :      * check tuple visibility, then release lock
    1571             :      */
    1572      611032 :     valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
    1573             : 
    1574      611032 :     if (valid)
    1575      610916 :         PredicateLockTID(relation, &(tuple->t_self), snapshot,
    1576      610916 :                          HeapTupleHeaderGetXmin(tuple->t_data));
    1577             : 
    1578      611032 :     HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
    1579             : 
    1580      611032 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1581             : 
    1582      611032 :     if (valid)
    1583             :     {
    1584             :         /*
    1585             :          * All checks passed, so return the tuple as valid. Caller is now
    1586             :          * responsible for releasing the buffer.
    1587             :          */
    1588      610916 :         *userbuf = buffer;
    1589             : 
    1590      610916 :         return true;
    1591             :     }
    1592             : 
    1593             :     /* Tuple failed time qual, but maybe caller wants to see it anyway. */
    1594         116 :     if (keep_buf)
    1595          58 :         *userbuf = buffer;
    1596             :     else
    1597             :     {
    1598          58 :         ReleaseBuffer(buffer);
    1599          58 :         *userbuf = InvalidBuffer;
    1600          58 :         tuple->t_data = NULL;
    1601             :     }
    1602             : 
    1603         116 :     return false;
    1604             : }
    1605             : 
    1606             : /*
    1607             :  *  heap_hot_search_buffer  - search HOT chain for tuple satisfying snapshot
    1608             :  *
    1609             :  * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
    1610             :  * of a HOT chain), and buffer is the buffer holding this tuple.  We search
    1611             :  * for the first chain member satisfying the given snapshot.  If one is
    1612             :  * found, we update *tid to reference that tuple's offset number, and
    1613             :  * return true.  If no match, return false without modifying *tid.
    1614             :  *
    1615             :  * heapTuple is a caller-supplied buffer.  When a match is found, we return
    1616             :  * the tuple here, in addition to updating *tid.  If no match is found, the
    1617             :  * contents of this buffer on return are undefined.
    1618             :  *
    1619             :  * If all_dead is not NULL, we check non-visible tuples to see if they are
    1620             :  * globally dead; *all_dead is set true if all members of the HOT chain
    1621             :  * are vacuumable, false if not.
    1622             :  *
    1623             :  * Unlike heap_fetch, the caller must already have pin and (at least) share
    1624             :  * lock on the buffer; it is still pinned/locked at exit.
    1625             :  */
    1626             : bool
    1627    37853398 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
    1628             :                        Snapshot snapshot, HeapTuple heapTuple,
    1629             :                        bool *all_dead, bool first_call)
    1630             : {
    1631    37853398 :     Page        page = BufferGetPage(buffer);
    1632    37853398 :     TransactionId prev_xmax = InvalidTransactionId;
    1633             :     BlockNumber blkno;
    1634             :     OffsetNumber offnum;
    1635             :     bool        at_chain_start;
    1636             :     bool        valid;
    1637             :     bool        skip;
    1638    37853398 :     GlobalVisState *vistest = NULL;
    1639             : 
    1640             :     /* If this is not the first call, previous call returned a (live!) tuple */
    1641    37853398 :     if (all_dead)
    1642    32177350 :         *all_dead = first_call;
    1643             : 
    1644    37853398 :     blkno = ItemPointerGetBlockNumber(tid);
    1645    37853398 :     offnum = ItemPointerGetOffsetNumber(tid);
    1646    37853398 :     at_chain_start = first_call;
    1647    37853398 :     skip = !first_call;
    1648             : 
    1649             :     /* XXX: we should assert that a snapshot is pushed or registered */
    1650             :     Assert(TransactionIdIsValid(RecentXmin));
    1651             :     Assert(BufferGetBlockNumber(buffer) == blkno);
    1652             : 
    1653             :     /* Scan through possible multiple members of HOT-chain */
    1654             :     for (;;)
    1655     1618246 :     {
    1656             :         ItemId      lp;
    1657             : 
    1658             :         /* check for bogus TID */
    1659    39471644 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1660             :             break;
    1661             : 
    1662    39471644 :         lp = PageGetItemId(page, offnum);
    1663             : 
    1664             :         /* check for unused, dead, or redirected items */
    1665    39471644 :         if (!ItemIdIsNormal(lp))
    1666             :         {
    1667             :             /* We should only see a redirect at start of chain */
    1668     1299000 :             if (ItemIdIsRedirected(lp) && at_chain_start)
    1669             :             {
    1670             :                 /* Follow the redirect */
    1671      614392 :                 offnum = ItemIdGetRedirect(lp);
    1672      614392 :                 at_chain_start = false;
    1673      614392 :                 continue;
    1674             :             }
    1675             :             /* else must be end of chain */
    1676      684608 :             break;
    1677             :         }
    1678             : 
    1679             :         /*
    1680             :          * Update heapTuple to point to the element of the HOT chain we're
    1681             :          * currently investigating. Having t_self set correctly is important
    1682             :          * because the SSI checks and the *Satisfies routine for historical
    1683             :          * MVCC snapshots need the correct tid to decide about the visibility.
    1684             :          */
    1685    38172644 :         heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1686    38172644 :         heapTuple->t_len = ItemIdGetLength(lp);
    1687    38172644 :         heapTuple->t_tableOid = RelationGetRelid(relation);
    1688    38172644 :         ItemPointerSet(&heapTuple->t_self, blkno, offnum);
    1689             : 
    1690             :         /*
    1691             :          * Shouldn't see a HEAP_ONLY tuple at chain start.
    1692             :          */
    1693    38172644 :         if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
    1694           0 :             break;
    1695             : 
    1696             :         /*
    1697             :          * The xmin should match the previous xmax value, else chain is
    1698             :          * broken.
    1699             :          */
    1700    39176498 :         if (TransactionIdIsValid(prev_xmax) &&
    1701     1003854 :             !TransactionIdEquals(prev_xmax,
    1702             :                                  HeapTupleHeaderGetXmin(heapTuple->t_data)))
    1703           0 :             break;
    1704             : 
    1705             :         /*
    1706             :          * When first_call is true (and thus, skip is initially false) we'll
    1707             :          * return the first tuple we find.  But on later passes, heapTuple
    1708             :          * will initially be pointing to the tuple we returned last time.
    1709             :          * Returning it again would be incorrect (and would loop forever), so
    1710             :          * we skip it and return the next match we find.
    1711             :          */
    1712    38172644 :         if (!skip)
    1713             :         {
    1714             :             /* If it's visible per the snapshot, we must return it */
    1715    38021202 :             valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
    1716    38021202 :             HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
    1717             :                                                 buffer, snapshot);
    1718             : 
    1719    38021192 :             if (valid)
    1720             :             {
    1721    24840968 :                 ItemPointerSetOffsetNumber(tid, offnum);
    1722    24840968 :                 PredicateLockTID(relation, &heapTuple->t_self, snapshot,
    1723    24840968 :                                  HeapTupleHeaderGetXmin(heapTuple->t_data));
    1724    24840968 :                 if (all_dead)
    1725    19700596 :                     *all_dead = false;
    1726    24840968 :                 return true;
    1727             :             }
    1728             :         }
    1729    13331666 :         skip = false;
    1730             : 
    1731             :         /*
    1732             :          * If we can't see it, maybe no one else can either.  At caller
    1733             :          * request, check whether all chain members are dead to all
    1734             :          * transactions.
    1735             :          *
    1736             :          * Note: if you change the criterion here for what is "dead", fix the
    1737             :          * planner's get_actual_variable_range() function to match.
    1738             :          */
    1739    13331666 :         if (all_dead && *all_dead)
    1740             :         {
    1741    12689306 :             if (!vistest)
    1742    12507552 :                 vistest = GlobalVisTestFor(relation);
    1743             : 
    1744    12689306 :             if (!HeapTupleIsSurelyDead(heapTuple, vistest))
    1745    12112444 :                 *all_dead = false;
    1746             :         }
    1747             : 
    1748             :         /*
    1749             :          * Check to see if HOT chain continues past this tuple; if so fetch
    1750             :          * the next offnum and loop around.
    1751             :          */
    1752    13331666 :         if (HeapTupleIsHotUpdated(heapTuple))
    1753             :         {
    1754             :             Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
    1755             :                    blkno);
    1756     1003854 :             offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
    1757     1003854 :             at_chain_start = false;
    1758     1003854 :             prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
    1759             :         }
    1760             :         else
    1761             :             break;              /* end of chain */
    1762             :     }
    1763             : 
    1764    13012420 :     return false;
    1765             : }
    1766             : 
    1767             : /*
    1768             :  *  heap_get_latest_tid -  get the latest tid of a specified tuple
    1769             :  *
    1770             :  * Actually, this gets the latest version that is visible according to the
    1771             :  * scan's snapshot.  Create a scan using SnapshotDirty to get the very latest,
    1772             :  * possibly uncommitted version.
    1773             :  *
    1774             :  * *tid is both an input and an output parameter: it is updated to
    1775             :  * show the latest version of the row.  Note that it will not be changed
    1776             :  * if no version of the row passes the snapshot test.
    1777             :  */
    1778             : void
    1779         294 : heap_get_latest_tid(TableScanDesc sscan,
    1780             :                     ItemPointer tid)
    1781             : {
    1782         294 :     Relation    relation = sscan->rs_rd;
    1783         294 :     Snapshot    snapshot = sscan->rs_snapshot;
    1784             :     ItemPointerData ctid;
    1785             :     TransactionId priorXmax;
    1786             : 
    1787             :     /*
    1788             :      * table_tuple_get_latest_tid() verified that the passed in tid is valid.
    1789             :      * Assume that t_ctid links are valid however - there shouldn't be invalid
    1790             :      * ones in the table.
    1791             :      */
    1792             :     Assert(ItemPointerIsValid(tid));
    1793             : 
    1794             :     /*
    1795             :      * Loop to chase down t_ctid links.  At top of loop, ctid is the tuple we
    1796             :      * need to examine, and *tid is the TID we will return if ctid turns out
    1797             :      * to be bogus.
    1798             :      *
    1799             :      * Note that we will loop until we reach the end of the t_ctid chain.
    1800             :      * Depending on the snapshot passed, there might be at most one visible
    1801             :      * version of the row, but we don't try to optimize for that.
    1802             :      */
    1803         294 :     ctid = *tid;
    1804         294 :     priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    1805             :     for (;;)
    1806          90 :     {
    1807             :         Buffer      buffer;
    1808             :         Page        page;
    1809             :         OffsetNumber offnum;
    1810             :         ItemId      lp;
    1811             :         HeapTupleData tp;
    1812             :         bool        valid;
    1813             : 
    1814             :         /*
    1815             :          * Read, pin, and lock the page.
    1816             :          */
    1817         384 :         buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
    1818         384 :         LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1819         384 :         page = BufferGetPage(buffer);
    1820             : 
    1821             :         /*
    1822             :          * Check for bogus item number.  This is not treated as an error
    1823             :          * condition because it can happen while following a t_ctid link. We
    1824             :          * just assume that the prior tid is OK and return it unchanged.
    1825             :          */
    1826         384 :         offnum = ItemPointerGetOffsetNumber(&ctid);
    1827         384 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1828             :         {
    1829           0 :             UnlockReleaseBuffer(buffer);
    1830           0 :             break;
    1831             :         }
    1832         384 :         lp = PageGetItemId(page, offnum);
    1833         384 :         if (!ItemIdIsNormal(lp))
    1834             :         {
    1835           0 :             UnlockReleaseBuffer(buffer);
    1836           0 :             break;
    1837             :         }
    1838             : 
    1839             :         /* OK to access the tuple */
    1840         384 :         tp.t_self = ctid;
    1841         384 :         tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1842         384 :         tp.t_len = ItemIdGetLength(lp);
    1843         384 :         tp.t_tableOid = RelationGetRelid(relation);
    1844             : 
    1845             :         /*
    1846             :          * After following a t_ctid link, we might arrive at an unrelated
    1847             :          * tuple.  Check for XMIN match.
    1848             :          */
    1849         474 :         if (TransactionIdIsValid(priorXmax) &&
    1850          90 :             !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
    1851             :         {
    1852           0 :             UnlockReleaseBuffer(buffer);
    1853           0 :             break;
    1854             :         }
    1855             : 
    1856             :         /*
    1857             :          * Check tuple visibility; if visible, set it as the new result
    1858             :          * candidate.
    1859             :          */
    1860         384 :         valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
    1861         384 :         HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
    1862         384 :         if (valid)
    1863         270 :             *tid = ctid;
    1864             : 
    1865             :         /*
    1866             :          * If there's a valid t_ctid link, follow it, else we're done.
    1867             :          */
    1868         546 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    1869         276 :             HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
    1870         228 :             HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
    1871         114 :             ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    1872             :         {
    1873         294 :             UnlockReleaseBuffer(buffer);
    1874         294 :             break;
    1875             :         }
    1876             : 
    1877          90 :         ctid = tp.t_data->t_ctid;
    1878          90 :         priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    1879          90 :         UnlockReleaseBuffer(buffer);
    1880             :     }                           /* end of loop */
    1881         294 : }
    1882             : 
    1883             : 
    1884             : /*
    1885             :  * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
    1886             :  *
    1887             :  * This is called after we have waited for the XMAX transaction to terminate.
    1888             :  * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
    1889             :  * be set on exit.  If the transaction committed, we set the XMAX_COMMITTED
    1890             :  * hint bit if possible --- but beware that that may not yet be possible,
    1891             :  * if the transaction committed asynchronously.
    1892             :  *
    1893             :  * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
    1894             :  * even if it commits.
    1895             :  *
    1896             :  * Hence callers should look only at XMAX_INVALID.
    1897             :  *
    1898             :  * Note this is not allowed for tuples whose xmax is a multixact.
    1899             :  */
    1900             : static void
    1901         338 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
    1902             : {
    1903             :     Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
    1904             :     Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
    1905             : 
    1906         338 :     if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
    1907             :     {
    1908         616 :         if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
    1909         278 :             TransactionIdDidCommit(xid))
    1910         226 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
    1911             :                                  xid);
    1912             :         else
    1913         112 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
    1914             :                                  InvalidTransactionId);
    1915             :     }
    1916         338 : }
    1917             : 
    1918             : 
    1919             : /*
    1920             :  * GetBulkInsertState - prepare status object for a bulk insert
    1921             :  */
    1922             : BulkInsertState
    1923        4624 : GetBulkInsertState(void)
    1924             : {
    1925             :     BulkInsertState bistate;
    1926             : 
    1927        4624 :     bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
    1928        4624 :     bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
    1929        4624 :     bistate->current_buf = InvalidBuffer;
    1930        4624 :     bistate->next_free = InvalidBlockNumber;
    1931        4624 :     bistate->last_free = InvalidBlockNumber;
    1932        4624 :     bistate->already_extended_by = 0;
    1933        4624 :     return bistate;
    1934             : }
    1935             : 
    1936             : /*
    1937             :  * FreeBulkInsertState - clean up after finishing a bulk insert
    1938             :  */
    1939             : void
    1940        4394 : FreeBulkInsertState(BulkInsertState bistate)
    1941             : {
    1942        4394 :     if (bistate->current_buf != InvalidBuffer)
    1943        3490 :         ReleaseBuffer(bistate->current_buf);
    1944        4394 :     FreeAccessStrategy(bistate->strategy);
    1945        4394 :     pfree(bistate);
    1946        4394 : }
    1947             : 
    1948             : /*
    1949             :  * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
    1950             :  */
    1951             : void
    1952      161512 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
    1953             : {
    1954      161512 :     if (bistate->current_buf != InvalidBuffer)
    1955       60042 :         ReleaseBuffer(bistate->current_buf);
    1956      161512 :     bistate->current_buf = InvalidBuffer;
    1957             : 
    1958             :     /*
    1959             :      * Despite the name, we also reset bulk relation extension state.
    1960             :      * Otherwise we can end up erroring out due to looking for free space in
    1961             :      * ->next_free of one partition, even though ->next_free was set when
    1962             :      * extending another partition. It could obviously also be bad for
    1963             :      * efficiency to look at existing blocks at offsets from another
    1964             :      * partition, even if we don't error out.
    1965             :      */
    1966      161512 :     bistate->next_free = InvalidBlockNumber;
    1967      161512 :     bistate->last_free = InvalidBlockNumber;
    1968      161512 : }
    1969             : 
    1970             : 
    1971             : /*
    1972             :  *  heap_insert     - insert tuple into a heap
    1973             :  *
    1974             :  * The new tuple is stamped with current transaction ID and the specified
    1975             :  * command ID.
    1976             :  *
    1977             :  * See table_tuple_insert for comments about most of the input flags, except
    1978             :  * that this routine directly takes a tuple rather than a slot.
    1979             :  *
    1980             :  * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
    1981             :  * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
    1982             :  * implement table_tuple_insert_speculative().
    1983             :  *
    1984             :  * On return the header fields of *tup are updated to match the stored tuple;
    1985             :  * in particular tup->t_self receives the actual TID where the tuple was
    1986             :  * stored.  But note that any toasting of fields within the tuple data is NOT
    1987             :  * reflected into *tup.
    1988             :  */
    1989             : void
    1990    15115766 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
    1991             :             int options, BulkInsertState bistate)
    1992             : {
    1993    15115766 :     TransactionId xid = GetCurrentTransactionId();
    1994             :     HeapTuple   heaptup;
    1995             :     Buffer      buffer;
    1996    15115766 :     Buffer      vmbuffer = InvalidBuffer;
    1997    15115766 :     bool        all_visible_cleared = false;
    1998             : 
    1999             :     /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
    2000             :     Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
    2001             :            RelationGetNumberOfAttributes(relation));
    2002             : 
    2003             :     /*
    2004             :      * Fill in tuple header fields and toast the tuple if necessary.
    2005             :      *
    2006             :      * Note: below this point, heaptup is the data we actually intend to store
    2007             :      * into the relation; tup is the caller's original untoasted data.
    2008             :      */
    2009    15115766 :     heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
    2010             : 
    2011             :     /*
    2012             :      * Find buffer to insert this tuple into.  If the page is all visible,
    2013             :      * this will also pin the requisite visibility map page.
    2014             :      */
    2015    15115766 :     buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
    2016             :                                        InvalidBuffer, options, bistate,
    2017             :                                        &vmbuffer, NULL,
    2018             :                                        0);
    2019             : 
    2020             :     /*
    2021             :      * We're about to do the actual insert -- but check for conflict first, to
    2022             :      * avoid possibly having to roll back work we've just done.
    2023             :      *
    2024             :      * This is safe without a recheck as long as there is no possibility of
    2025             :      * another process scanning the page between this check and the insert
    2026             :      * being visible to the scan (i.e., an exclusive buffer content lock is
    2027             :      * continuously held from this point until the tuple insert is visible).
    2028             :      *
    2029             :      * For a heap insert, we only need to check for table-level SSI locks. Our
    2030             :      * new tuple can't possibly conflict with existing tuple locks, and heap
    2031             :      * page locks are only consolidated versions of tuple locks; they do not
    2032             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2033             :      * buffer when making the call, which makes for a faster check.
    2034             :      */
    2035    15115766 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2036             : 
    2037             :     /* NO EREPORT(ERROR) from here till changes are logged */
    2038    15115742 :     START_CRIT_SECTION();
    2039             : 
    2040    15115742 :     RelationPutHeapTuple(relation, buffer, heaptup,
    2041    15115742 :                          (options & HEAP_INSERT_SPECULATIVE) != 0);
    2042             : 
    2043    15115742 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    2044             :     {
    2045       11386 :         all_visible_cleared = true;
    2046       11386 :         PageClearAllVisible(BufferGetPage(buffer));
    2047       11386 :         visibilitymap_clear(relation,
    2048       11386 :                             ItemPointerGetBlockNumber(&(heaptup->t_self)),
    2049             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    2050             :     }
    2051             : 
    2052             :     /*
    2053             :      * XXX Should we set PageSetPrunable on this page ?
    2054             :      *
    2055             :      * The inserting transaction may eventually abort thus making this tuple
    2056             :      * DEAD and hence available for pruning. Though we don't want to optimize
    2057             :      * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
    2058             :      * aborted tuple will never be pruned until next vacuum is triggered.
    2059             :      *
    2060             :      * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
    2061             :      */
    2062             : 
    2063    15115742 :     MarkBufferDirty(buffer);
    2064             : 
    2065             :     /* XLOG stuff */
    2066    15115742 :     if (RelationNeedsWAL(relation))
    2067             :     {
    2068             :         xl_heap_insert xlrec;
    2069             :         xl_heap_header xlhdr;
    2070             :         XLogRecPtr  recptr;
    2071    13121760 :         Page        page = BufferGetPage(buffer);
    2072    13121760 :         uint8       info = XLOG_HEAP_INSERT;
    2073    13121760 :         int         bufflags = 0;
    2074             : 
    2075             :         /*
    2076             :          * If this is a catalog, we need to transmit combo CIDs to properly
    2077             :          * decode, so log that as well.
    2078             :          */
    2079    13121760 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    2080        5678 :             log_heap_new_cid(relation, heaptup);
    2081             : 
    2082             :         /*
    2083             :          * If this is the single and first tuple on page, we can reinit the
    2084             :          * page instead of restoring the whole thing.  Set flag, and hide
    2085             :          * buffer references from XLogInsert.
    2086             :          */
    2087    13277760 :         if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
    2088      156000 :             PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    2089             :         {
    2090      154948 :             info |= XLOG_HEAP_INIT_PAGE;
    2091      154948 :             bufflags |= REGBUF_WILL_INIT;
    2092             :         }
    2093             : 
    2094    13121760 :         xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2095    13121760 :         xlrec.flags = 0;
    2096    13121760 :         if (all_visible_cleared)
    2097       11380 :             xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
    2098    13121760 :         if (options & HEAP_INSERT_SPECULATIVE)
    2099        4014 :             xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
    2100             :         Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
    2101             : 
    2102             :         /*
    2103             :          * For logical decoding, we need the tuple even if we're doing a full
    2104             :          * page write, so make sure it's included even if we take a full-page
    2105             :          * image. (XXX We could alternatively store a pointer into the FPW).
    2106             :          */
    2107    13121760 :         if (RelationIsLogicallyLogged(relation) &&
    2108      489382 :             !(options & HEAP_INSERT_NO_LOGICAL))
    2109             :         {
    2110      489328 :             xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2111      489328 :             bufflags |= REGBUF_KEEP_DATA;
    2112             : 
    2113      489328 :             if (IsToastRelation(relation))
    2114        3368 :                 xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
    2115             :         }
    2116             : 
    2117    13121760 :         XLogBeginInsert();
    2118    13121760 :         XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
    2119             : 
    2120    13121760 :         xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
    2121    13121760 :         xlhdr.t_infomask = heaptup->t_data->t_infomask;
    2122    13121760 :         xlhdr.t_hoff = heaptup->t_data->t_hoff;
    2123             : 
    2124             :         /*
    2125             :          * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
    2126             :          * write the whole page to the xlog, we don't need to store
    2127             :          * xl_heap_header in the xlog.
    2128             :          */
    2129    13121760 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2130    13121760 :         XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
    2131             :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    2132    13121760 :         XLogRegisterBufData(0,
    2133    13121760 :                             (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2134    13121760 :                             heaptup->t_len - SizeofHeapTupleHeader);
    2135             : 
    2136             :         /* filtering by origin on a row level is much more efficient */
    2137    13121760 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2138             : 
    2139    13121760 :         recptr = XLogInsert(RM_HEAP_ID, info);
    2140             : 
    2141    13121760 :         PageSetLSN(page, recptr);
    2142             :     }
    2143             : 
    2144    15115742 :     END_CRIT_SECTION();
    2145             : 
    2146    15115742 :     UnlockReleaseBuffer(buffer);
    2147    15115742 :     if (vmbuffer != InvalidBuffer)
    2148       11656 :         ReleaseBuffer(vmbuffer);
    2149             : 
    2150             :     /*
    2151             :      * If tuple is cachable, mark it for invalidation from the caches in case
    2152             :      * we abort.  Note it is OK to do this after releasing the buffer, because
    2153             :      * the heaptup data structure is all in local memory, not in the shared
    2154             :      * buffer.
    2155             :      */
    2156    15115742 :     CacheInvalidateHeapTuple(relation, heaptup, NULL);
    2157             : 
    2158             :     /* Note: speculative insertions are counted too, even if aborted later */
    2159    15115742 :     pgstat_count_heap_insert(relation, 1);
    2160             : 
    2161             :     /*
    2162             :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    2163             :      * back to the caller's image, too.
    2164             :      */
    2165    15115742 :     if (heaptup != tup)
    2166             :     {
    2167       31652 :         tup->t_self = heaptup->t_self;
    2168       31652 :         heap_freetuple(heaptup);
    2169             :     }
    2170    15115742 : }
    2171             : 
    2172             : /*
    2173             :  * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
    2174             :  * tuple header fields and toasts the tuple if necessary.  Returns a toasted
    2175             :  * version of the tuple if it was toasted, or the original tuple if not. Note
    2176             :  * that in any case, the header fields are also set in the original tuple.
    2177             :  */
    2178             : static HeapTuple
    2179    17837172 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
    2180             :                     CommandId cid, int options)
    2181             : {
    2182             :     /*
    2183             :      * To allow parallel inserts, we need to ensure that they are safe to be
    2184             :      * performed in workers. We have the infrastructure to allow parallel
    2185             :      * inserts in general except for the cases where inserts generate a new
    2186             :      * CommandId (eg. inserts into a table having a foreign key column).
    2187             :      */
    2188    17837172 :     if (IsParallelWorker())
    2189           0 :         ereport(ERROR,
    2190             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2191             :                  errmsg("cannot insert tuples in a parallel worker")));
    2192             : 
    2193    17837172 :     tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    2194    17837172 :     tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    2195    17837172 :     tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
    2196    17837172 :     HeapTupleHeaderSetXmin(tup->t_data, xid);
    2197    17837172 :     if (options & HEAP_INSERT_FROZEN)
    2198      200724 :         HeapTupleHeaderSetXminFrozen(tup->t_data);
    2199             : 
    2200    17837172 :     HeapTupleHeaderSetCmin(tup->t_data, cid);
    2201    17837172 :     HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
    2202    17837172 :     tup->t_tableOid = RelationGetRelid(relation);
    2203             : 
    2204             :     /*
    2205             :      * If the new tuple is too big for storage or contains already toasted
    2206             :      * out-of-line attributes from some other relation, invoke the toaster.
    2207             :      */
    2208    17837172 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    2209       55230 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    2210             :     {
    2211             :         /* toast table entries should never be recursively toasted */
    2212             :         Assert(!HeapTupleHasExternal(tup));
    2213       51906 :         return tup;
    2214             :     }
    2215    17785266 :     else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
    2216       31734 :         return heap_toast_insert_or_update(relation, tup, NULL, options);
    2217             :     else
    2218    17753532 :         return tup;
    2219             : }
    2220             : 
    2221             : /*
    2222             :  * Helper for heap_multi_insert() that computes the number of entire pages
    2223             :  * that inserting the remaining heaptuples requires. Used to determine how
    2224             :  * much the relation needs to be extended by.
    2225             :  */
    2226             : static int
    2227      622496 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
    2228             : {
    2229      622496 :     size_t      page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
    2230      622496 :     int         npages = 1;
    2231             : 
    2232     4519874 :     for (int i = done; i < ntuples; i++)
    2233             :     {
    2234     3897378 :         size_t      tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
    2235             : 
    2236     3897378 :         if (page_avail < tup_sz)
    2237             :         {
    2238       31034 :             npages++;
    2239       31034 :             page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
    2240             :         }
    2241     3897378 :         page_avail -= tup_sz;
    2242             :     }
    2243             : 
    2244      622496 :     return npages;
    2245             : }
    2246             : 
    2247             : /*
    2248             :  *  heap_multi_insert   - insert multiple tuples into a heap
    2249             :  *
    2250             :  * This is like heap_insert(), but inserts multiple tuples in one operation.
    2251             :  * That's faster than calling heap_insert() in a loop, because when multiple
    2252             :  * tuples can be inserted on a single page, we can write just a single WAL
    2253             :  * record covering all of them, and only need to lock/unlock the page once.
    2254             :  *
    2255             :  * Note: this leaks memory into the current memory context. You can create a
    2256             :  * temporary context before calling this, if that's a problem.
    2257             :  */
    2258             : void
    2259      611018 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
    2260             :                   CommandId cid, int options, BulkInsertState bistate)
    2261             : {
    2262      611018 :     TransactionId xid = GetCurrentTransactionId();
    2263             :     HeapTuple  *heaptuples;
    2264             :     int         i;
    2265             :     int         ndone;
    2266             :     PGAlignedBlock scratch;
    2267             :     Page        page;
    2268      611018 :     Buffer      vmbuffer = InvalidBuffer;
    2269             :     bool        needwal;
    2270             :     Size        saveFreeSpace;
    2271      611018 :     bool        need_tuple_data = RelationIsLogicallyLogged(relation);
    2272      611018 :     bool        need_cids = RelationIsAccessibleInLogicalDecoding(relation);
    2273      611018 :     bool        starting_with_empty_page = false;
    2274      611018 :     int         npages = 0;
    2275      611018 :     int         npages_used = 0;
    2276             : 
    2277             :     /* currently not needed (thus unsupported) for heap_multi_insert() */
    2278             :     Assert(!(options & HEAP_INSERT_NO_LOGICAL));
    2279             : 
    2280      611018 :     needwal = RelationNeedsWAL(relation);
    2281      611018 :     saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
    2282             :                                                    HEAP_DEFAULT_FILLFACTOR);
    2283             : 
    2284             :     /* Toast and set header data in all the slots */
    2285      611018 :     heaptuples = palloc(ntuples * sizeof(HeapTuple));
    2286     3332424 :     for (i = 0; i < ntuples; i++)
    2287             :     {
    2288             :         HeapTuple   tuple;
    2289             : 
    2290     2721406 :         tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
    2291     2721406 :         slots[i]->tts_tableOid = RelationGetRelid(relation);
    2292     2721406 :         tuple->t_tableOid = slots[i]->tts_tableOid;
    2293     2721406 :         heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
    2294             :                                             options);
    2295             :     }
    2296             : 
    2297             :     /*
    2298             :      * We're about to do the actual inserts -- but check for conflict first,
    2299             :      * to minimize the possibility of having to roll back work we've just
    2300             :      * done.
    2301             :      *
    2302             :      * A check here does not definitively prevent a serialization anomaly;
    2303             :      * that check MUST be done at least past the point of acquiring an
    2304             :      * exclusive buffer content lock on every buffer that will be affected,
    2305             :      * and MAY be done after all inserts are reflected in the buffers and
    2306             :      * those locks are released; otherwise there is a race condition.  Since
    2307             :      * multiple buffers can be locked and unlocked in the loop below, and it
    2308             :      * would not be feasible to identify and lock all of those buffers before
    2309             :      * the loop, we must do a final check at the end.
    2310             :      *
    2311             :      * The check here could be omitted with no loss of correctness; it is
    2312             :      * present strictly as an optimization.
    2313             :      *
    2314             :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2315             :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2316             :      * page locks are only consolidated versions of tuple locks; they do not
    2317             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2318             :      * buffer when making the call, which makes for a faster check.
    2319             :      */
    2320      611018 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2321             : 
    2322      611018 :     ndone = 0;
    2323     1249672 :     while (ndone < ntuples)
    2324             :     {
    2325             :         Buffer      buffer;
    2326      638654 :         bool        all_visible_cleared = false;
    2327      638654 :         bool        all_frozen_set = false;
    2328             :         int         nthispage;
    2329             : 
    2330      638654 :         CHECK_FOR_INTERRUPTS();
    2331             : 
    2332             :         /*
    2333             :          * Compute number of pages needed to fit the to-be-inserted tuples in
    2334             :          * the worst case.  This will be used to determine how much to extend
    2335             :          * the relation by in RelationGetBufferForTuple(), if needed.  If we
    2336             :          * filled a prior page from scratch, we can just update our last
    2337             :          * computation, but if we started with a partially filled page,
    2338             :          * recompute from scratch, the number of potentially required pages
    2339             :          * can vary due to tuples needing to fit onto the page, page headers
    2340             :          * etc.
    2341             :          */
    2342      638654 :         if (ndone == 0 || !starting_with_empty_page)
    2343             :         {
    2344      622496 :             npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
    2345             :                                              saveFreeSpace);
    2346      622496 :             npages_used = 0;
    2347             :         }
    2348             :         else
    2349       16158 :             npages_used++;
    2350             : 
    2351             :         /*
    2352             :          * Find buffer where at least the next tuple will fit.  If the page is
    2353             :          * all-visible, this will also pin the requisite visibility map page.
    2354             :          *
    2355             :          * Also pin visibility map page if COPY FREEZE inserts tuples into an
    2356             :          * empty page. See all_frozen_set below.
    2357             :          */
    2358      638654 :         buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
    2359             :                                            InvalidBuffer, options, bistate,
    2360             :                                            &vmbuffer, NULL,
    2361             :                                            npages - npages_used);
    2362      638654 :         page = BufferGetPage(buffer);
    2363             : 
    2364      638654 :         starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
    2365             : 
    2366      638654 :         if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
    2367        3322 :             all_frozen_set = true;
    2368             : 
    2369             :         /* NO EREPORT(ERROR) from here till changes are logged */
    2370      638654 :         START_CRIT_SECTION();
    2371             : 
    2372             :         /*
    2373             :          * RelationGetBufferForTuple has ensured that the first tuple fits.
    2374             :          * Put that on the page, and then as many other tuples as fit.
    2375             :          */
    2376      638654 :         RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
    2377             : 
    2378             :         /*
    2379             :          * For logical decoding we need combo CIDs to properly decode the
    2380             :          * catalog.
    2381             :          */
    2382      638654 :         if (needwal && need_cids)
    2383        8528 :             log_heap_new_cid(relation, heaptuples[ndone]);
    2384             : 
    2385     2721406 :         for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
    2386             :         {
    2387     2110388 :             HeapTuple   heaptup = heaptuples[ndone + nthispage];
    2388             : 
    2389     2110388 :             if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
    2390       27636 :                 break;
    2391             : 
    2392     2082752 :             RelationPutHeapTuple(relation, buffer, heaptup, false);
    2393             : 
    2394             :             /*
    2395             :              * For logical decoding we need combo CIDs to properly decode the
    2396             :              * catalog.
    2397             :              */
    2398     2082752 :             if (needwal && need_cids)
    2399        8456 :                 log_heap_new_cid(relation, heaptup);
    2400             :         }
    2401             : 
    2402             :         /*
    2403             :          * If the page is all visible, need to clear that, unless we're only
    2404             :          * going to add further frozen rows to it.
    2405             :          *
    2406             :          * If we're only adding already frozen rows to a previously empty
    2407             :          * page, mark it as all-visible.
    2408             :          */
    2409      638654 :         if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
    2410             :         {
    2411        4432 :             all_visible_cleared = true;
    2412        4432 :             PageClearAllVisible(page);
    2413        4432 :             visibilitymap_clear(relation,
    2414             :                                 BufferGetBlockNumber(buffer),
    2415             :                                 vmbuffer, VISIBILITYMAP_VALID_BITS);
    2416             :         }
    2417      634222 :         else if (all_frozen_set)
    2418        3322 :             PageSetAllVisible(page);
    2419             : 
    2420             :         /*
    2421             :          * XXX Should we set PageSetPrunable on this page ? See heap_insert()
    2422             :          */
    2423             : 
    2424      638654 :         MarkBufferDirty(buffer);
    2425             : 
    2426             :         /* XLOG stuff */
    2427      638654 :         if (needwal)
    2428             :         {
    2429             :             XLogRecPtr  recptr;
    2430             :             xl_heap_multi_insert *xlrec;
    2431      631018 :             uint8       info = XLOG_HEAP2_MULTI_INSERT;
    2432             :             char       *tupledata;
    2433             :             int         totaldatalen;
    2434      631018 :             char       *scratchptr = scratch.data;
    2435             :             bool        init;
    2436      631018 :             int         bufflags = 0;
    2437             : 
    2438             :             /*
    2439             :              * If the page was previously empty, we can reinit the page
    2440             :              * instead of restoring the whole thing.
    2441             :              */
    2442      631018 :             init = starting_with_empty_page;
    2443             : 
    2444             :             /* allocate xl_heap_multi_insert struct from the scratch area */
    2445      631018 :             xlrec = (xl_heap_multi_insert *) scratchptr;
    2446      631018 :             scratchptr += SizeOfHeapMultiInsert;
    2447             : 
    2448             :             /*
    2449             :              * Allocate offsets array. Unless we're reinitializing the page,
    2450             :              * in that case the tuples are stored in order starting at
    2451             :              * FirstOffsetNumber and we don't need to store the offsets
    2452             :              * explicitly.
    2453             :              */
    2454      631018 :             if (!init)
    2455      606258 :                 scratchptr += nthispage * sizeof(OffsetNumber);
    2456             : 
    2457             :             /* the rest of the scratch space is used for tuple data */
    2458      631018 :             tupledata = scratchptr;
    2459             : 
    2460             :             /* check that the mutually exclusive flags are not both set */
    2461             :             Assert(!(all_visible_cleared && all_frozen_set));
    2462             : 
    2463      631018 :             xlrec->flags = 0;
    2464      631018 :             if (all_visible_cleared)
    2465        4432 :                 xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
    2466      631018 :             if (all_frozen_set)
    2467          26 :                 xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
    2468             : 
    2469      631018 :             xlrec->ntuples = nthispage;
    2470             : 
    2471             :             /*
    2472             :              * Write out an xl_multi_insert_tuple and the tuple data itself
    2473             :              * for each tuple.
    2474             :              */
    2475     2941640 :             for (i = 0; i < nthispage; i++)
    2476             :             {
    2477     2310622 :                 HeapTuple   heaptup = heaptuples[ndone + i];
    2478             :                 xl_multi_insert_tuple *tuphdr;
    2479             :                 int         datalen;
    2480             : 
    2481     2310622 :                 if (!init)
    2482     1294148 :                     xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2483             :                 /* xl_multi_insert_tuple needs two-byte alignment. */
    2484     2310622 :                 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
    2485     2310622 :                 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
    2486             : 
    2487     2310622 :                 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
    2488     2310622 :                 tuphdr->t_infomask = heaptup->t_data->t_infomask;
    2489     2310622 :                 tuphdr->t_hoff = heaptup->t_data->t_hoff;
    2490             : 
    2491             :                 /* write bitmap [+ padding] [+ oid] + data */
    2492     2310622 :                 datalen = heaptup->t_len - SizeofHeapTupleHeader;
    2493     2310622 :                 memcpy(scratchptr,
    2494     2310622 :                        (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2495             :                        datalen);
    2496     2310622 :                 tuphdr->datalen = datalen;
    2497     2310622 :                 scratchptr += datalen;
    2498             :             }
    2499      631018 :             totaldatalen = scratchptr - tupledata;
    2500             :             Assert((scratchptr - scratch.data) < BLCKSZ);
    2501             : 
    2502      631018 :             if (need_tuple_data)
    2503         146 :                 xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2504             : 
    2505             :             /*
    2506             :              * Signal that this is the last xl_heap_multi_insert record
    2507             :              * emitted by this call to heap_multi_insert(). Needed for logical
    2508             :              * decoding so it knows when to cleanup temporary data.
    2509             :              */
    2510      631018 :             if (ndone + nthispage == ntuples)
    2511      610200 :                 xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
    2512             : 
    2513      631018 :             if (init)
    2514             :             {
    2515       24760 :                 info |= XLOG_HEAP_INIT_PAGE;
    2516       24760 :                 bufflags |= REGBUF_WILL_INIT;
    2517             :             }
    2518             : 
    2519             :             /*
    2520             :              * If we're doing logical decoding, include the new tuple data
    2521             :              * even if we take a full-page image of the page.
    2522             :              */
    2523      631018 :             if (need_tuple_data)
    2524         146 :                 bufflags |= REGBUF_KEEP_DATA;
    2525             : 
    2526      631018 :             XLogBeginInsert();
    2527      631018 :             XLogRegisterData((char *) xlrec, tupledata - scratch.data);
    2528      631018 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2529             : 
    2530      631018 :             XLogRegisterBufData(0, tupledata, totaldatalen);
    2531             : 
    2532             :             /* filtering by origin on a row level is much more efficient */
    2533      631018 :             XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2534             : 
    2535      631018 :             recptr = XLogInsert(RM_HEAP2_ID, info);
    2536             : 
    2537      631018 :             PageSetLSN(page, recptr);
    2538             :         }
    2539             : 
    2540      638654 :         END_CRIT_SECTION();
    2541             : 
    2542             :         /*
    2543             :          * If we've frozen everything on the page, update the visibilitymap.
    2544             :          * We're already holding pin on the vmbuffer.
    2545             :          */
    2546      638654 :         if (all_frozen_set)
    2547             :         {
    2548             :             Assert(PageIsAllVisible(page));
    2549             :             Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
    2550             : 
    2551             :             /*
    2552             :              * It's fine to use InvalidTransactionId here - this is only used
    2553             :              * when HEAP_INSERT_FROZEN is specified, which intentionally
    2554             :              * violates visibility rules.
    2555             :              */
    2556        3322 :             visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
    2557             :                               InvalidXLogRecPtr, vmbuffer,
    2558             :                               InvalidTransactionId,
    2559             :                               VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
    2560             :         }
    2561             : 
    2562      638654 :         UnlockReleaseBuffer(buffer);
    2563      638654 :         ndone += nthispage;
    2564             : 
    2565             :         /*
    2566             :          * NB: Only release vmbuffer after inserting all tuples - it's fairly
    2567             :          * likely that we'll insert into subsequent heap pages that are likely
    2568             :          * to use the same vm page.
    2569             :          */
    2570             :     }
    2571             : 
    2572             :     /* We're done with inserting all tuples, so release the last vmbuffer. */
    2573      611018 :     if (vmbuffer != InvalidBuffer)
    2574        4644 :         ReleaseBuffer(vmbuffer);
    2575             : 
    2576             :     /*
    2577             :      * We're done with the actual inserts.  Check for conflicts again, to
    2578             :      * ensure that all rw-conflicts in to these inserts are detected.  Without
    2579             :      * this final check, a sequential scan of the heap may have locked the
    2580             :      * table after the "before" check, missing one opportunity to detect the
    2581             :      * conflict, and then scanned the table before the new tuples were there,
    2582             :      * missing the other chance to detect the conflict.
    2583             :      *
    2584             :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2585             :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2586             :      * page locks are only consolidated versions of tuple locks; they do not
    2587             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2588             :      * buffer when making the call.
    2589             :      */
    2590      611018 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2591             : 
    2592             :     /*
    2593             :      * If tuples are cachable, mark them for invalidation from the caches in
    2594             :      * case we abort.  Note it is OK to do this after releasing the buffer,
    2595             :      * because the heaptuples data structure is all in local memory, not in
    2596             :      * the shared buffer.
    2597             :      */
    2598      611018 :     if (IsCatalogRelation(relation))
    2599             :     {
    2600     2142224 :         for (i = 0; i < ntuples; i++)
    2601     1533526 :             CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
    2602             :     }
    2603             : 
    2604             :     /* copy t_self fields back to the caller's slots */
    2605     3332424 :     for (i = 0; i < ntuples; i++)
    2606     2721406 :         slots[i]->tts_tid = heaptuples[i]->t_self;
    2607             : 
    2608      611018 :     pgstat_count_heap_insert(relation, ntuples);
    2609      611018 : }
    2610             : 
    2611             : /*
    2612             :  *  simple_heap_insert - insert a tuple
    2613             :  *
    2614             :  * Currently, this routine differs from heap_insert only in supplying
    2615             :  * a default command ID and not allowing access to the speedup options.
    2616             :  *
    2617             :  * This should be used rather than using heap_insert directly in most places
    2618             :  * where we are modifying system catalogs.
    2619             :  */
    2620             : void
    2621     1432998 : simple_heap_insert(Relation relation, HeapTuple tup)
    2622             : {
    2623     1432998 :     heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
    2624     1432998 : }
    2625             : 
    2626             : /*
    2627             :  * Given infomask/infomask2, compute the bits that must be saved in the
    2628             :  * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
    2629             :  * xl_heap_lock_updated WAL records.
    2630             :  *
    2631             :  * See fix_infomask_from_infobits.
    2632             :  */
    2633             : static uint8
    2634     3603346 : compute_infobits(uint16 infomask, uint16 infomask2)
    2635             : {
    2636             :     return
    2637     3603346 :         ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
    2638     3603346 :         ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
    2639     3603346 :         ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
    2640             :     /* note we ignore HEAP_XMAX_SHR_LOCK here */
    2641     7206692 :         ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
    2642             :         ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
    2643     3603346 :          XLHL_KEYS_UPDATED : 0);
    2644             : }
    2645             : 
    2646             : /*
    2647             :  * Given two versions of the same t_infomask for a tuple, compare them and
    2648             :  * return whether the relevant status for a tuple Xmax has changed.  This is
    2649             :  * used after a buffer lock has been released and reacquired: we want to ensure
    2650             :  * that the tuple state continues to be the same it was when we previously
    2651             :  * examined it.
    2652             :  *
    2653             :  * Note the Xmax field itself must be compared separately.
    2654             :  */
    2655             : static inline bool
    2656       10640 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
    2657             : {
    2658       10640 :     const uint16 interesting =
    2659             :         HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
    2660             : 
    2661       10640 :     if ((new_infomask & interesting) != (old_infomask & interesting))
    2662          28 :         return true;
    2663             : 
    2664       10612 :     return false;
    2665             : }
    2666             : 
    2667             : /*
    2668             :  *  heap_delete - delete a tuple
    2669             :  *
    2670             :  * See table_tuple_delete() for an explanation of the parameters, except that
    2671             :  * this routine directly takes a tuple rather than a slot.
    2672             :  *
    2673             :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    2674             :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    2675             :  * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
    2676             :  * generated by another transaction).
    2677             :  */
    2678             : TM_Result
    2679     2754848 : heap_delete(Relation relation, ItemPointer tid,
    2680             :             CommandId cid, Snapshot crosscheck, bool wait,
    2681             :             TM_FailureData *tmfd, bool changingPart)
    2682             : {
    2683             :     TM_Result   result;
    2684     2754848 :     TransactionId xid = GetCurrentTransactionId();
    2685             :     ItemId      lp;
    2686             :     HeapTupleData tp;
    2687             :     Page        page;
    2688             :     BlockNumber block;
    2689             :     Buffer      buffer;
    2690     2754848 :     Buffer      vmbuffer = InvalidBuffer;
    2691             :     TransactionId new_xmax;
    2692             :     uint16      new_infomask,
    2693             :                 new_infomask2;
    2694     2754848 :     bool        have_tuple_lock = false;
    2695             :     bool        iscombo;
    2696     2754848 :     bool        all_visible_cleared = false;
    2697     2754848 :     HeapTuple   old_key_tuple = NULL;   /* replica identity of the tuple */
    2698     2754848 :     bool        old_key_copied = false;
    2699             : 
    2700             :     Assert(ItemPointerIsValid(tid));
    2701             : 
    2702             :     /*
    2703             :      * Forbid this during a parallel operation, lest it allocate a combo CID.
    2704             :      * Other workers might need that combo CID for visibility checks, and we
    2705             :      * have no provision for broadcasting it to them.
    2706             :      */
    2707     2754848 :     if (IsInParallelMode())
    2708           0 :         ereport(ERROR,
    2709             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2710             :                  errmsg("cannot delete tuples during a parallel operation")));
    2711             : 
    2712     2754848 :     block = ItemPointerGetBlockNumber(tid);
    2713     2754848 :     buffer = ReadBuffer(relation, block);
    2714     2754848 :     page = BufferGetPage(buffer);
    2715             : 
    2716             :     /*
    2717             :      * Before locking the buffer, pin the visibility map page if it appears to
    2718             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    2719             :      * in the middle of changing this, so we'll need to recheck after we have
    2720             :      * the lock.
    2721             :      */
    2722     2754848 :     if (PageIsAllVisible(page))
    2723         220 :         visibilitymap_pin(relation, block, &vmbuffer);
    2724             : 
    2725     2754848 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2726             : 
    2727     2754848 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    2728             :     Assert(ItemIdIsNormal(lp));
    2729             : 
    2730     2754848 :     tp.t_tableOid = RelationGetRelid(relation);
    2731     2754848 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    2732     2754848 :     tp.t_len = ItemIdGetLength(lp);
    2733     2754848 :     tp.t_self = *tid;
    2734             : 
    2735     2754850 : l1:
    2736             : 
    2737             :     /*
    2738             :      * If we didn't pin the visibility map page and the page has become all
    2739             :      * visible while we were busy locking the buffer, we'll have to unlock and
    2740             :      * re-lock, to avoid holding the buffer lock across an I/O.  That's a bit
    2741             :      * unfortunate, but hopefully shouldn't happen often.
    2742             :      */
    2743     2754850 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    2744             :     {
    2745           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2746           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    2747           0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2748             :     }
    2749             : 
    2750     2754850 :     result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
    2751             : 
    2752     2754850 :     if (result == TM_Invisible)
    2753             :     {
    2754           0 :         UnlockReleaseBuffer(buffer);
    2755           0 :         ereport(ERROR,
    2756             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    2757             :                  errmsg("attempted to delete invisible tuple")));
    2758             :     }
    2759     2754850 :     else if (result == TM_BeingModified && wait)
    2760             :     {
    2761             :         TransactionId xwait;
    2762             :         uint16      infomask;
    2763             : 
    2764             :         /* must copy state data before unlocking buffer */
    2765       81078 :         xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
    2766       81078 :         infomask = tp.t_data->t_infomask;
    2767             : 
    2768             :         /*
    2769             :          * Sleep until concurrent transaction ends -- except when there's a
    2770             :          * single locker and it's our own transaction.  Note we don't care
    2771             :          * which lock mode the locker has, because we need the strongest one.
    2772             :          *
    2773             :          * Before sleeping, we need to acquire tuple lock to establish our
    2774             :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    2775             :          * release us when we are next-in-line for the tuple.
    2776             :          *
    2777             :          * If we are forced to "start over" below, we keep the tuple lock;
    2778             :          * this arranges that we stay at the head of the line while rechecking
    2779             :          * tuple state.
    2780             :          */
    2781       81078 :         if (infomask & HEAP_XMAX_IS_MULTI)
    2782             :         {
    2783          16 :             bool        current_is_member = false;
    2784             : 
    2785          16 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    2786             :                                         LockTupleExclusive, &current_is_member))
    2787             :             {
    2788          16 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2789             : 
    2790             :                 /*
    2791             :                  * Acquire the lock, if necessary (but skip it when we're
    2792             :                  * requesting a lock and already have one; avoids deadlock).
    2793             :                  */
    2794          16 :                 if (!current_is_member)
    2795          12 :                     heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    2796             :                                          LockWaitBlock, &have_tuple_lock);
    2797             : 
    2798             :                 /* wait for multixact */
    2799          16 :                 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
    2800             :                                 relation, &(tp.t_self), XLTW_Delete,
    2801             :                                 NULL);
    2802          16 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2803             : 
    2804             :                 /*
    2805             :                  * If xwait had just locked the tuple then some other xact
    2806             :                  * could update this tuple before we get to this point.  Check
    2807             :                  * for xmax change, and start over if so.
    2808             :                  *
    2809             :                  * We also must start over if we didn't pin the VM page, and
    2810             :                  * the page has become all visible.
    2811             :                  */
    2812          32 :                 if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
    2813          16 :                     xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    2814          16 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    2815             :                                          xwait))
    2816           0 :                     goto l1;
    2817             :             }
    2818             : 
    2819             :             /*
    2820             :              * You might think the multixact is necessarily done here, but not
    2821             :              * so: it could have surviving members, namely our own xact or
    2822             :              * other subxacts of this backend.  It is legal for us to delete
    2823             :              * the tuple in either case, however (the latter case is
    2824             :              * essentially a situation of upgrading our former shared lock to
    2825             :              * exclusive).  We don't bother changing the on-disk hint bits
    2826             :              * since we are about to overwrite the xmax altogether.
    2827             :              */
    2828             :         }
    2829       81062 :         else if (!TransactionIdIsCurrentTransactionId(xwait))
    2830             :         {
    2831             :             /*
    2832             :              * Wait for regular transaction to end; but first, acquire tuple
    2833             :              * lock.
    2834             :              */
    2835          80 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2836          80 :             heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    2837             :                                  LockWaitBlock, &have_tuple_lock);
    2838          80 :             XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
    2839          72 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2840             : 
    2841             :             /*
    2842             :              * xwait is done, but if xwait had just locked the tuple then some
    2843             :              * other xact could update this tuple before we get to this point.
    2844             :              * Check for xmax change, and start over if so.
    2845             :              *
    2846             :              * We also must start over if we didn't pin the VM page, and the
    2847             :              * page has become all visible.
    2848             :              */
    2849         144 :             if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
    2850          72 :                 xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    2851          70 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    2852             :                                      xwait))
    2853           2 :                 goto l1;
    2854             : 
    2855             :             /* Otherwise check if it committed or aborted */
    2856          70 :             UpdateXmaxHintBits(tp.t_data, buffer, xwait);
    2857             :         }
    2858             : 
    2859             :         /*
    2860             :          * We may overwrite if previous xmax aborted, or if it committed but
    2861             :          * only locked the tuple without updating it.
    2862             :          */
    2863       81068 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    2864       81090 :             HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
    2865          50 :             HeapTupleHeaderIsOnlyLocked(tp.t_data))
    2866       81026 :             result = TM_Ok;
    2867          42 :         else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    2868          34 :             result = TM_Updated;
    2869             :         else
    2870           8 :             result = TM_Deleted;
    2871             :     }
    2872             : 
    2873             :     /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
    2874             :     if (result != TM_Ok)
    2875             :     {
    2876             :         Assert(result == TM_SelfModified ||
    2877             :                result == TM_Updated ||
    2878             :                result == TM_Deleted ||
    2879             :                result == TM_BeingModified);
    2880             :         Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
    2881             :         Assert(result != TM_Updated ||
    2882             :                !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
    2883             :     }
    2884             : 
    2885     2754840 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    2886             :     {
    2887             :         /* Perform additional check for transaction-snapshot mode RI updates */
    2888           2 :         if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
    2889           2 :             result = TM_Updated;
    2890             :     }
    2891             : 
    2892     2754840 :     if (result != TM_Ok)
    2893             :     {
    2894         112 :         tmfd->ctid = tp.t_data->t_ctid;
    2895         112 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    2896         112 :         if (result == TM_SelfModified)
    2897          42 :             tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
    2898             :         else
    2899          70 :             tmfd->cmax = InvalidCommandId;
    2900         112 :         UnlockReleaseBuffer(buffer);
    2901         112 :         if (have_tuple_lock)
    2902          42 :             UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    2903         112 :         if (vmbuffer != InvalidBuffer)
    2904           0 :             ReleaseBuffer(vmbuffer);
    2905         112 :         return result;
    2906             :     }
    2907             : 
    2908             :     /*
    2909             :      * We're about to do the actual delete -- check for conflict first, to
    2910             :      * avoid possibly having to roll back work we've just done.
    2911             :      *
    2912             :      * This is safe without a recheck as long as there is no possibility of
    2913             :      * another process scanning the page between this check and the delete
    2914             :      * being visible to the scan (i.e., an exclusive buffer content lock is
    2915             :      * continuously held from this point until the tuple delete is visible).
    2916             :      */
    2917     2754728 :     CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
    2918             : 
    2919             :     /* replace cid with a combo CID if necessary */
    2920     2754700 :     HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
    2921             : 
    2922             :     /*
    2923             :      * Compute replica identity tuple before entering the critical section so
    2924             :      * we don't PANIC upon a memory allocation failure.
    2925             :      */
    2926     2754700 :     old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
    2927             : 
    2928             :     /*
    2929             :      * If this is the first possibly-multixact-able operation in the current
    2930             :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    2931             :      * certain that the transaction will never become a member of any older
    2932             :      * MultiXactIds than that.  (We have to do this even if we end up just
    2933             :      * using our own TransactionId below, since some other backend could
    2934             :      * incorporate our XID into a MultiXact immediately afterwards.)
    2935             :      */
    2936     2754700 :     MultiXactIdSetOldestMember();
    2937             : 
    2938     2754700 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
    2939     2754700 :                               tp.t_data->t_infomask, tp.t_data->t_infomask2,
    2940             :                               xid, LockTupleExclusive, true,
    2941             :                               &new_xmax, &new_infomask, &new_infomask2);
    2942             : 
    2943     2754700 :     START_CRIT_SECTION();
    2944             : 
    2945             :     /*
    2946             :      * If this transaction commits, the tuple will become DEAD sooner or
    2947             :      * later.  Set flag that this page is a candidate for pruning once our xid
    2948             :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    2949             :      * the subsequent page pruning will be a no-op and the hint will be
    2950             :      * cleared.
    2951             :      */
    2952     2754700 :     PageSetPrunable(page, xid);
    2953             : 
    2954     2754700 :     if (PageIsAllVisible(page))
    2955             :     {
    2956         220 :         all_visible_cleared = true;
    2957         220 :         PageClearAllVisible(page);
    2958         220 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    2959             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    2960             :     }
    2961             : 
    2962             :     /* store transaction information of xact deleting the tuple */
    2963     2754700 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    2964     2754700 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    2965     2754700 :     tp.t_data->t_infomask |= new_infomask;
    2966     2754700 :     tp.t_data->t_infomask2 |= new_infomask2;
    2967     2754700 :     HeapTupleHeaderClearHotUpdated(tp.t_data);
    2968     2754700 :     HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
    2969     2754700 :     HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
    2970             :     /* Make sure there is no forward chain link in t_ctid */
    2971     2754700 :     tp.t_data->t_ctid = tp.t_self;
    2972             : 
    2973             :     /* Signal that this is actually a move into another partition */
    2974     2754700 :     if (changingPart)
    2975         892 :         HeapTupleHeaderSetMovedPartitions(tp.t_data);
    2976             : 
    2977     2754700 :     MarkBufferDirty(buffer);
    2978             : 
    2979             :     /*
    2980             :      * XLOG stuff
    2981             :      *
    2982             :      * NB: heap_abort_speculative() uses the same xlog record and replay
    2983             :      * routines.
    2984             :      */
    2985     2754700 :     if (RelationNeedsWAL(relation))
    2986             :     {
    2987             :         xl_heap_delete xlrec;
    2988             :         xl_heap_header xlhdr;
    2989             :         XLogRecPtr  recptr;
    2990             : 
    2991             :         /*
    2992             :          * For logical decode we need combo CIDs to properly decode the
    2993             :          * catalog
    2994             :          */
    2995     2633536 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    2996       10584 :             log_heap_new_cid(relation, &tp);
    2997             : 
    2998     2633536 :         xlrec.flags = 0;
    2999     2633536 :         if (all_visible_cleared)
    3000         220 :             xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
    3001     2633536 :         if (changingPart)
    3002         892 :             xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
    3003     5267072 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    3004     2633536 :                                               tp.t_data->t_infomask2);
    3005     2633536 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    3006     2633536 :         xlrec.xmax = new_xmax;
    3007             : 
    3008     2633536 :         if (old_key_tuple != NULL)
    3009             :         {
    3010       94000 :             if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    3011         242 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
    3012             :             else
    3013       93758 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
    3014             :         }
    3015             : 
    3016     2633536 :         XLogBeginInsert();
    3017     2633536 :         XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
    3018             : 
    3019     2633536 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3020             : 
    3021             :         /*
    3022             :          * Log replica identity of the deleted tuple if there is one
    3023             :          */
    3024     2633536 :         if (old_key_tuple != NULL)
    3025             :         {
    3026       94000 :             xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    3027       94000 :             xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
    3028       94000 :             xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
    3029             : 
    3030       94000 :             XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
    3031       94000 :             XLogRegisterData((char *) old_key_tuple->t_data
    3032             :                              + SizeofHeapTupleHeader,
    3033       94000 :                              old_key_tuple->t_len
    3034             :                              - SizeofHeapTupleHeader);
    3035             :         }
    3036             : 
    3037             :         /* filtering by origin on a row level is much more efficient */
    3038     2633536 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    3039             : 
    3040     2633536 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    3041             : 
    3042     2633536 :         PageSetLSN(page, recptr);
    3043             :     }
    3044             : 
    3045     2754700 :     END_CRIT_SECTION();
    3046             : 
    3047     2754700 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3048             : 
    3049     2754700 :     if (vmbuffer != InvalidBuffer)
    3050         220 :         ReleaseBuffer(vmbuffer);
    3051             : 
    3052             :     /*
    3053             :      * If the tuple has toasted out-of-line attributes, we need to delete
    3054             :      * those items too.  We have to do this before releasing the buffer
    3055             :      * because we need to look at the contents of the tuple, but it's OK to
    3056             :      * release the content lock on the buffer first.
    3057             :      */
    3058     2754700 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3059        3322 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3060             :     {
    3061             :         /* toast table entries should never be recursively toasted */
    3062             :         Assert(!HeapTupleHasExternal(&tp));
    3063             :     }
    3064     2751398 :     else if (HeapTupleHasExternal(&tp))
    3065         512 :         heap_toast_delete(relation, &tp, false);
    3066             : 
    3067             :     /*
    3068             :      * Mark tuple for invalidation from system caches at next command
    3069             :      * boundary. We have to do this before releasing the buffer because we
    3070             :      * need to look at the contents of the tuple.
    3071             :      */
    3072     2754700 :     CacheInvalidateHeapTuple(relation, &tp, NULL);
    3073             : 
    3074             :     /* Now we can release the buffer */
    3075     2754700 :     ReleaseBuffer(buffer);
    3076             : 
    3077             :     /*
    3078             :      * Release the lmgr tuple lock, if we had it.
    3079             :      */
    3080     2754700 :     if (have_tuple_lock)
    3081          40 :         UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    3082             : 
    3083     2754700 :     pgstat_count_heap_delete(relation);
    3084             : 
    3085     2754700 :     if (old_key_tuple != NULL && old_key_copied)
    3086       93760 :         heap_freetuple(old_key_tuple);
    3087             : 
    3088     2754700 :     return TM_Ok;
    3089             : }
    3090             : 
    3091             : /*
    3092             :  *  simple_heap_delete - delete a tuple
    3093             :  *
    3094             :  * This routine may be used to delete a tuple when concurrent updates of
    3095             :  * the target tuple are not expected (for example, because we have a lock
    3096             :  * on the relation associated with the tuple).  Any failure is reported
    3097             :  * via ereport().
    3098             :  */
    3099             : void
    3100     1148272 : simple_heap_delete(Relation relation, ItemPointer tid)
    3101             : {
    3102             :     TM_Result   result;
    3103             :     TM_FailureData tmfd;
    3104             : 
    3105     1148272 :     result = heap_delete(relation, tid,
    3106             :                          GetCurrentCommandId(true), InvalidSnapshot,
    3107             :                          true /* wait for commit */ ,
    3108             :                          &tmfd, false /* changingPart */ );
    3109     1148272 :     switch (result)
    3110             :     {
    3111           0 :         case TM_SelfModified:
    3112             :             /* Tuple was already updated in current command? */
    3113           0 :             elog(ERROR, "tuple already updated by self");
    3114             :             break;
    3115             : 
    3116     1148272 :         case TM_Ok:
    3117             :             /* done successfully */
    3118     1148272 :             break;
    3119             : 
    3120           0 :         case TM_Updated:
    3121           0 :             elog(ERROR, "tuple concurrently updated");
    3122             :             break;
    3123             : 
    3124           0 :         case TM_Deleted:
    3125           0 :             elog(ERROR, "tuple concurrently deleted");
    3126             :             break;
    3127             : 
    3128           0 :         default:
    3129           0 :             elog(ERROR, "unrecognized heap_delete status: %u", result);
    3130             :             break;
    3131             :     }
    3132     1148272 : }
    3133             : 
    3134             : /*
    3135             :  *  heap_update - replace a tuple
    3136             :  *
    3137             :  * See table_tuple_update() for an explanation of the parameters, except that
    3138             :  * this routine directly takes a tuple rather than a slot.
    3139             :  *
    3140             :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    3141             :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    3142             :  * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
    3143             :  * generated by another transaction).
    3144             :  */
    3145             : TM_Result
    3146      566910 : heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
    3147             :             CommandId cid, Snapshot crosscheck, bool wait,
    3148             :             TM_FailureData *tmfd, LockTupleMode *lockmode,
    3149             :             TU_UpdateIndexes *update_indexes)
    3150             : {
    3151             :     TM_Result   result;
    3152      566910 :     TransactionId xid = GetCurrentTransactionId();
    3153             :     Bitmapset  *hot_attrs;
    3154             :     Bitmapset  *sum_attrs;
    3155             :     Bitmapset  *key_attrs;
    3156             :     Bitmapset  *id_attrs;
    3157             :     Bitmapset  *interesting_attrs;
    3158             :     Bitmapset  *modified_attrs;
    3159             :     ItemId      lp;
    3160             :     HeapTupleData oldtup;
    3161             :     HeapTuple   heaptup;
    3162      566910 :     HeapTuple   old_key_tuple = NULL;
    3163      566910 :     bool        old_key_copied = false;
    3164             :     Page        page;
    3165             :     BlockNumber block;
    3166             :     MultiXactStatus mxact_status;
    3167             :     Buffer      buffer,
    3168             :                 newbuf,
    3169      566910 :                 vmbuffer = InvalidBuffer,
    3170      566910 :                 vmbuffer_new = InvalidBuffer;
    3171             :     bool        need_toast;
    3172             :     Size        newtupsize,
    3173             :                 pagefree;
    3174      566910 :     bool        have_tuple_lock = false;
    3175             :     bool        iscombo;
    3176      566910 :     bool        use_hot_update = false;
    3177      566910 :     bool        summarized_update = false;
    3178             :     bool        key_intact;
    3179      566910 :     bool        all_visible_cleared = false;
    3180      566910 :     bool        all_visible_cleared_new = false;
    3181             :     bool        checked_lockers;
    3182             :     bool        locker_remains;
    3183      566910 :     bool        id_has_external = false;
    3184             :     TransactionId xmax_new_tuple,
    3185             :                 xmax_old_tuple;
    3186             :     uint16      infomask_old_tuple,
    3187             :                 infomask2_old_tuple,
    3188             :                 infomask_new_tuple,
    3189             :                 infomask2_new_tuple;
    3190             : 
    3191             :     Assert(ItemPointerIsValid(otid));
    3192             : 
    3193             :     /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
    3194             :     Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
    3195             :            RelationGetNumberOfAttributes(relation));
    3196             : 
    3197             :     /*
    3198             :      * Forbid this during a parallel operation, lest it allocate a combo CID.
    3199             :      * Other workers might need that combo CID for visibility checks, and we
    3200             :      * have no provision for broadcasting it to them.
    3201             :      */
    3202      566910 :     if (IsInParallelMode())
    3203           0 :         ereport(ERROR,
    3204             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    3205             :                  errmsg("cannot update tuples during a parallel operation")));
    3206             : 
    3207             :     /*
    3208             :      * Fetch the list of attributes to be checked for various operations.
    3209             :      *
    3210             :      * For HOT considerations, this is wasted effort if we fail to update or
    3211             :      * have to put the new tuple on a different page.  But we must compute the
    3212             :      * list before obtaining buffer lock --- in the worst case, if we are
    3213             :      * doing an update on one of the relevant system catalogs, we could
    3214             :      * deadlock if we try to fetch the list later.  In any case, the relcache
    3215             :      * caches the data so this is usually pretty cheap.
    3216             :      *
    3217             :      * We also need columns used by the replica identity and columns that are
    3218             :      * considered the "key" of rows in the table.
    3219             :      *
    3220             :      * Note that we get copies of each bitmap, so we need not worry about
    3221             :      * relcache flush happening midway through.
    3222             :      */
    3223      566910 :     hot_attrs = RelationGetIndexAttrBitmap(relation,
    3224             :                                            INDEX_ATTR_BITMAP_HOT_BLOCKING);
    3225      566910 :     sum_attrs = RelationGetIndexAttrBitmap(relation,
    3226             :                                            INDEX_ATTR_BITMAP_SUMMARIZED);
    3227      566910 :     key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
    3228      566910 :     id_attrs = RelationGetIndexAttrBitmap(relation,
    3229             :                                           INDEX_ATTR_BITMAP_IDENTITY_KEY);
    3230      566910 :     interesting_attrs = NULL;
    3231      566910 :     interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
    3232      566910 :     interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
    3233      566910 :     interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
    3234      566910 :     interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
    3235             : 
    3236      566910 :     block = ItemPointerGetBlockNumber(otid);
    3237      566910 :     buffer = ReadBuffer(relation, block);
    3238      566910 :     page = BufferGetPage(buffer);
    3239             : 
    3240             :     /*
    3241             :      * Before locking the buffer, pin the visibility map page if it appears to
    3242             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    3243             :      * in the middle of changing this, so we'll need to recheck after we have
    3244             :      * the lock.
    3245             :      */
    3246      566910 :     if (PageIsAllVisible(page))
    3247        2246 :         visibilitymap_pin(relation, block, &vmbuffer);
    3248             : 
    3249      566910 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3250             : 
    3251      566910 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
    3252             :     Assert(ItemIdIsNormal(lp));
    3253             : 
    3254             :     /*
    3255             :      * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
    3256             :      * properly.
    3257             :      */
    3258      566910 :     oldtup.t_tableOid = RelationGetRelid(relation);
    3259      566910 :     oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    3260      566910 :     oldtup.t_len = ItemIdGetLength(lp);
    3261      566910 :     oldtup.t_self = *otid;
    3262             : 
    3263             :     /* the new tuple is ready, except for this: */
    3264      566910 :     newtup->t_tableOid = RelationGetRelid(relation);
    3265             : 
    3266             :     /*
    3267             :      * Determine columns modified by the update.  Additionally, identify
    3268             :      * whether any of the unmodified replica identity key attributes in the
    3269             :      * old tuple is externally stored or not.  This is required because for
    3270             :      * such attributes the flattened value won't be WAL logged as part of the
    3271             :      * new tuple so we must include it as part of the old_key_tuple.  See
    3272             :      * ExtractReplicaIdentity.
    3273             :      */
    3274      566910 :     modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
    3275             :                                               id_attrs, &oldtup,
    3276             :                                               newtup, &id_has_external);
    3277             : 
    3278             :     /*
    3279             :      * If we're not updating any "key" column, we can grab a weaker lock type.
    3280             :      * This allows for more concurrency when we are running simultaneously
    3281             :      * with foreign key checks.
    3282             :      *
    3283             :      * Note that if a column gets detoasted while executing the update, but
    3284             :      * the value ends up being the same, this test will fail and we will use
    3285             :      * the stronger lock.  This is acceptable; the important case to optimize
    3286             :      * is updates that don't manipulate key columns, not those that
    3287             :      * serendipitously arrive at the same key values.
    3288             :      */
    3289      566910 :     if (!bms_overlap(modified_attrs, key_attrs))
    3290             :     {
    3291      559344 :         *lockmode = LockTupleNoKeyExclusive;
    3292      559344 :         mxact_status = MultiXactStatusNoKeyUpdate;
    3293      559344 :         key_intact = true;
    3294             : 
    3295             :         /*
    3296             :          * If this is the first possibly-multixact-able operation in the
    3297             :          * current transaction, set my per-backend OldestMemberMXactId
    3298             :          * setting. We can be certain that the transaction will never become a
    3299             :          * member of any older MultiXactIds than that.  (We have to do this
    3300             :          * even if we end up just using our own TransactionId below, since
    3301             :          * some other backend could incorporate our XID into a MultiXact
    3302             :          * immediately afterwards.)
    3303             :          */
    3304      559344 :         MultiXactIdSetOldestMember();
    3305             :     }
    3306             :     else
    3307             :     {
    3308        7566 :         *lockmode = LockTupleExclusive;
    3309        7566 :         mxact_status = MultiXactStatusUpdate;
    3310        7566 :         key_intact = false;
    3311             :     }
    3312             : 
    3313             :     /*
    3314             :      * Note: beyond this point, use oldtup not otid to refer to old tuple.
    3315             :      * otid may very well point at newtup->t_self, which we will overwrite
    3316             :      * with the new tuple's location, so there's great risk of confusion if we
    3317             :      * use otid anymore.
    3318             :      */
    3319             : 
    3320      566910 : l2:
    3321      566912 :     checked_lockers = false;
    3322      566912 :     locker_remains = false;
    3323      566912 :     result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
    3324             : 
    3325             :     /* see below about the "no wait" case */
    3326             :     Assert(result != TM_BeingModified || wait);
    3327             : 
    3328      566912 :     if (result == TM_Invisible)
    3329             :     {
    3330           0 :         UnlockReleaseBuffer(buffer);
    3331           0 :         ereport(ERROR,
    3332             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    3333             :                  errmsg("attempted to update invisible tuple")));
    3334             :     }
    3335      566912 :     else if (result == TM_BeingModified && wait)
    3336             :     {
    3337             :         TransactionId xwait;
    3338             :         uint16      infomask;
    3339       71772 :         bool        can_continue = false;
    3340             : 
    3341             :         /*
    3342             :          * XXX note that we don't consider the "no wait" case here.  This
    3343             :          * isn't a problem currently because no caller uses that case, but it
    3344             :          * should be fixed if such a caller is introduced.  It wasn't a
    3345             :          * problem previously because this code would always wait, but now
    3346             :          * that some tuple locks do not conflict with one of the lock modes we
    3347             :          * use, it is possible that this case is interesting to handle
    3348             :          * specially.
    3349             :          *
    3350             :          * This may cause failures with third-party code that calls
    3351             :          * heap_update directly.
    3352             :          */
    3353             : 
    3354             :         /* must copy state data before unlocking buffer */
    3355       71772 :         xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3356       71772 :         infomask = oldtup.t_data->t_infomask;
    3357             : 
    3358             :         /*
    3359             :          * Now we have to do something about the existing locker.  If it's a
    3360             :          * multi, sleep on it; we might be awakened before it is completely
    3361             :          * gone (or even not sleep at all in some cases); we need to preserve
    3362             :          * it as locker, unless it is gone completely.
    3363             :          *
    3364             :          * If it's not a multi, we need to check for sleeping conditions
    3365             :          * before actually going to sleep.  If the update doesn't conflict
    3366             :          * with the locks, we just continue without sleeping (but making sure
    3367             :          * it is preserved).
    3368             :          *
    3369             :          * Before sleeping, we need to acquire tuple lock to establish our
    3370             :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    3371             :          * release us when we are next-in-line for the tuple.  Note we must
    3372             :          * not acquire the tuple lock until we're sure we're going to sleep;
    3373             :          * otherwise we're open for race conditions with other transactions
    3374             :          * holding the tuple lock which sleep on us.
    3375             :          *
    3376             :          * If we are forced to "start over" below, we keep the tuple lock;
    3377             :          * this arranges that we stay at the head of the line while rechecking
    3378             :          * tuple state.
    3379             :          */
    3380       71772 :         if (infomask & HEAP_XMAX_IS_MULTI)
    3381             :         {
    3382             :             TransactionId update_xact;
    3383             :             int         remain;
    3384         120 :             bool        current_is_member = false;
    3385             : 
    3386         120 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    3387             :                                         *lockmode, &current_is_member))
    3388             :             {
    3389          16 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3390             : 
    3391             :                 /*
    3392             :                  * Acquire the lock, if necessary (but skip it when we're
    3393             :                  * requesting a lock and already have one; avoids deadlock).
    3394             :                  */
    3395          16 :                 if (!current_is_member)
    3396           0 :                     heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3397             :                                          LockWaitBlock, &have_tuple_lock);
    3398             : 
    3399             :                 /* wait for multixact */
    3400          16 :                 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
    3401             :                                 relation, &oldtup.t_self, XLTW_Update,
    3402             :                                 &remain);
    3403          16 :                 checked_lockers = true;
    3404          16 :                 locker_remains = remain != 0;
    3405          16 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3406             : 
    3407             :                 /*
    3408             :                  * If xwait had just locked the tuple then some other xact
    3409             :                  * could update this tuple before we get to this point.  Check
    3410             :                  * for xmax change, and start over if so.
    3411             :                  */
    3412          16 :                 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
    3413          16 :                                           infomask) ||
    3414          16 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3415             :                                          xwait))
    3416           0 :                     goto l2;
    3417             :             }
    3418             : 
    3419             :             /*
    3420             :              * Note that the multixact may not be done by now.  It could have
    3421             :              * surviving members; our own xact or other subxacts of this
    3422             :              * backend, and also any other concurrent transaction that locked
    3423             :              * the tuple with LockTupleKeyShare if we only got
    3424             :              * LockTupleNoKeyExclusive.  If this is the case, we have to be
    3425             :              * careful to mark the updated tuple with the surviving members in
    3426             :              * Xmax.
    3427             :              *
    3428             :              * Note that there could have been another update in the
    3429             :              * MultiXact. In that case, we need to check whether it committed
    3430             :              * or aborted. If it aborted we are safe to update it again;
    3431             :              * otherwise there is an update conflict, and we have to return
    3432             :              * TableTuple{Deleted, Updated} below.
    3433             :              *
    3434             :              * In the LockTupleExclusive case, we still need to preserve the
    3435             :              * surviving members: those would include the tuple locks we had
    3436             :              * before this one, which are important to keep in case this
    3437             :              * subxact aborts.
    3438             :              */
    3439         120 :             if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
    3440          16 :                 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
    3441             :             else
    3442         104 :                 update_xact = InvalidTransactionId;
    3443             : 
    3444             :             /*
    3445             :              * There was no UPDATE in the MultiXact; or it aborted. No
    3446             :              * TransactionIdIsInProgress() call needed here, since we called
    3447             :              * MultiXactIdWait() above.
    3448             :              */
    3449         136 :             if (!TransactionIdIsValid(update_xact) ||
    3450          16 :                 TransactionIdDidAbort(update_xact))
    3451         106 :                 can_continue = true;
    3452             :         }
    3453       71652 :         else if (TransactionIdIsCurrentTransactionId(xwait))
    3454             :         {
    3455             :             /*
    3456             :              * The only locker is ourselves; we can avoid grabbing the tuple
    3457             :              * lock here, but must preserve our locking information.
    3458             :              */
    3459       71476 :             checked_lockers = true;
    3460       71476 :             locker_remains = true;
    3461       71476 :             can_continue = true;
    3462             :         }
    3463         176 :         else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
    3464             :         {
    3465             :             /*
    3466             :              * If it's just a key-share locker, and we're not changing the key
    3467             :              * columns, we don't need to wait for it to end; but we need to
    3468             :              * preserve it as locker.
    3469             :              */
    3470          58 :             checked_lockers = true;
    3471          58 :             locker_remains = true;
    3472          58 :             can_continue = true;
    3473             :         }
    3474             :         else
    3475             :         {
    3476             :             /*
    3477             :              * Wait for regular transaction to end; but first, acquire tuple
    3478             :              * lock.
    3479             :              */
    3480         118 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3481         118 :             heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3482             :                                  LockWaitBlock, &have_tuple_lock);
    3483         118 :             XactLockTableWait(xwait, relation, &oldtup.t_self,
    3484             :                               XLTW_Update);
    3485         118 :             checked_lockers = true;
    3486         118 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3487             : 
    3488             :             /*
    3489             :              * xwait is done, but if xwait had just locked the tuple then some
    3490             :              * other xact could update this tuple before we get to this point.
    3491             :              * Check for xmax change, and start over if so.
    3492             :              */
    3493         118 :             if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
    3494         116 :                 !TransactionIdEquals(xwait,
    3495             :                                      HeapTupleHeaderGetRawXmax(oldtup.t_data)))
    3496           2 :                 goto l2;
    3497             : 
    3498             :             /* Otherwise check if it committed or aborted */
    3499         116 :             UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
    3500         116 :             if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
    3501          24 :                 can_continue = true;
    3502             :         }
    3503             : 
    3504       71770 :         if (can_continue)
    3505       71664 :             result = TM_Ok;
    3506         106 :         else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
    3507          96 :             result = TM_Updated;
    3508             :         else
    3509          10 :             result = TM_Deleted;
    3510             :     }
    3511             : 
    3512             :     /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
    3513             :     if (result != TM_Ok)
    3514             :     {
    3515             :         Assert(result == TM_SelfModified ||
    3516             :                result == TM_Updated ||
    3517             :                result == TM_Deleted ||
    3518             :                result == TM_BeingModified);
    3519             :         Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
    3520             :         Assert(result != TM_Updated ||
    3521             :                !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
    3522             :     }
    3523             : 
    3524      566910 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    3525             :     {
    3526             :         /* Perform additional check for transaction-snapshot mode RI updates */
    3527           2 :         if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
    3528           2 :             result = TM_Updated;
    3529             :     }
    3530             : 
    3531      566910 :     if (result != TM_Ok)
    3532             :     {
    3533         302 :         tmfd->ctid = oldtup.t_data->t_ctid;
    3534         302 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
    3535         302 :         if (result == TM_SelfModified)
    3536         104 :             tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
    3537             :         else
    3538         198 :             tmfd->cmax = InvalidCommandId;
    3539         302 :         UnlockReleaseBuffer(buffer);
    3540         302 :         if (have_tuple_lock)
    3541          92 :             UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    3542         302 :         if (vmbuffer != InvalidBuffer)
    3543           0 :             ReleaseBuffer(vmbuffer);
    3544         302 :         *update_indexes = TU_None;
    3545             : 
    3546         302 :         bms_free(hot_attrs);
    3547         302 :         bms_free(sum_attrs);
    3548         302 :         bms_free(key_attrs);
    3549         302 :         bms_free(id_attrs);
    3550         302 :         bms_free(modified_attrs);
    3551         302 :         bms_free(interesting_attrs);
    3552         302 :         return result;
    3553             :     }
    3554             : 
    3555             :     /*
    3556             :      * If we didn't pin the visibility map page and the page has become all
    3557             :      * visible while we were busy locking the buffer, or during some
    3558             :      * subsequent window during which we had it unlocked, we'll have to unlock
    3559             :      * and re-lock, to avoid holding the buffer lock across an I/O.  That's a
    3560             :      * bit unfortunate, especially since we'll now have to recheck whether the
    3561             :      * tuple has been locked or updated under us, but hopefully it won't
    3562             :      * happen very often.
    3563             :      */
    3564      566608 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    3565             :     {
    3566           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3567           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    3568           0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3569           0 :         goto l2;
    3570             :     }
    3571             : 
    3572             :     /* Fill in transaction status data */
    3573             : 
    3574             :     /*
    3575             :      * If the tuple we're updating is locked, we need to preserve the locking
    3576             :      * info in the old tuple's Xmax.  Prepare a new Xmax value for this.
    3577             :      */
    3578      566608 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3579      566608 :                               oldtup.t_data->t_infomask,
    3580      566608 :                               oldtup.t_data->t_infomask2,
    3581             :                               xid, *lockmode, true,
    3582             :                               &xmax_old_tuple, &infomask_old_tuple,
    3583             :                               &infomask2_old_tuple);
    3584             : 
    3585             :     /*
    3586             :      * And also prepare an Xmax value for the new copy of the tuple.  If there
    3587             :      * was no xmax previously, or there was one but all lockers are now gone,
    3588             :      * then use InvalidTransactionId; otherwise, get the xmax from the old
    3589             :      * tuple.  (In rare cases that might also be InvalidTransactionId and yet
    3590             :      * not have the HEAP_XMAX_INVALID bit set; that's fine.)
    3591             :      */
    3592      566608 :     if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    3593       71640 :         HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
    3594       71536 :         (checked_lockers && !locker_remains))
    3595      494968 :         xmax_new_tuple = InvalidTransactionId;
    3596             :     else
    3597       71640 :         xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3598             : 
    3599      566608 :     if (!TransactionIdIsValid(xmax_new_tuple))
    3600             :     {
    3601      494968 :         infomask_new_tuple = HEAP_XMAX_INVALID;
    3602      494968 :         infomask2_new_tuple = 0;
    3603             :     }
    3604             :     else
    3605             :     {
    3606             :         /*
    3607             :          * If we found a valid Xmax for the new tuple, then the infomask bits
    3608             :          * to use on the new tuple depend on what was there on the old one.
    3609             :          * Note that since we're doing an update, the only possibility is that
    3610             :          * the lockers had FOR KEY SHARE lock.
    3611             :          */
    3612       71640 :         if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
    3613             :         {
    3614         106 :             GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
    3615             :                                    &infomask2_new_tuple);
    3616             :         }
    3617             :         else
    3618             :         {
    3619       71534 :             infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
    3620       71534 :             infomask2_new_tuple = 0;
    3621             :         }
    3622             :     }
    3623             : 
    3624             :     /*
    3625             :      * Prepare the new tuple with the appropriate initial values of Xmin and
    3626             :      * Xmax, as well as initial infomask bits as computed above.
    3627             :      */
    3628      566608 :     newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    3629      566608 :     newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    3630      566608 :     HeapTupleHeaderSetXmin(newtup->t_data, xid);
    3631      566608 :     HeapTupleHeaderSetCmin(newtup->t_data, cid);
    3632      566608 :     newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
    3633      566608 :     newtup->t_data->t_infomask2 |= infomask2_new_tuple;
    3634      566608 :     HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
    3635             : 
    3636             :     /*
    3637             :      * Replace cid with a combo CID if necessary.  Note that we already put
    3638             :      * the plain cid into the new tuple.
    3639             :      */
    3640      566608 :     HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
    3641             : 
    3642             :     /*
    3643             :      * If the toaster needs to be activated, OR if the new tuple will not fit
    3644             :      * on the same page as the old, then we need to release the content lock
    3645             :      * (but not the pin!) on the old tuple's buffer while we are off doing
    3646             :      * TOAST and/or table-file-extension work.  We must mark the old tuple to
    3647             :      * show that it's locked, else other processes may try to update it
    3648             :      * themselves.
    3649             :      *
    3650             :      * We need to invoke the toaster if there are already any out-of-line
    3651             :      * toasted values present, or if the new tuple is over-threshold.
    3652             :      */
    3653      566608 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3654           0 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3655             :     {
    3656             :         /* toast table entries should never be recursively toasted */
    3657             :         Assert(!HeapTupleHasExternal(&oldtup));
    3658             :         Assert(!HeapTupleHasExternal(newtup));
    3659           0 :         need_toast = false;
    3660             :     }
    3661             :     else
    3662      566608 :         need_toast = (HeapTupleHasExternal(&oldtup) ||
    3663     1132696 :                       HeapTupleHasExternal(newtup) ||
    3664      566088 :                       newtup->t_len > TOAST_TUPLE_THRESHOLD);
    3665             : 
    3666      566608 :     pagefree = PageGetHeapFreeSpace(page);
    3667             : 
    3668      566608 :     newtupsize = MAXALIGN(newtup->t_len);
    3669             : 
    3670      566608 :     if (need_toast || newtupsize > pagefree)
    3671      283138 :     {
    3672             :         TransactionId xmax_lock_old_tuple;
    3673             :         uint16      infomask_lock_old_tuple,
    3674             :                     infomask2_lock_old_tuple;
    3675      283138 :         bool        cleared_all_frozen = false;
    3676             : 
    3677             :         /*
    3678             :          * To prevent concurrent sessions from updating the tuple, we have to
    3679             :          * temporarily mark it locked, while we release the page-level lock.
    3680             :          *
    3681             :          * To satisfy the rule that any xid potentially appearing in a buffer
    3682             :          * written out to disk, we unfortunately have to WAL log this
    3683             :          * temporary modification.  We can reuse xl_heap_lock for this
    3684             :          * purpose.  If we crash/error before following through with the
    3685             :          * actual update, xmax will be of an aborted transaction, allowing
    3686             :          * other sessions to proceed.
    3687             :          */
    3688             : 
    3689             :         /*
    3690             :          * Compute xmax / infomask appropriate for locking the tuple. This has
    3691             :          * to be done separately from the combo that's going to be used for
    3692             :          * updating, because the potentially created multixact would otherwise
    3693             :          * be wrong.
    3694             :          */
    3695      283138 :         compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3696      283138 :                                   oldtup.t_data->t_infomask,
    3697      283138 :                                   oldtup.t_data->t_infomask2,
    3698             :                                   xid, *lockmode, false,
    3699             :                                   &xmax_lock_old_tuple, &infomask_lock_old_tuple,
    3700             :                                   &infomask2_lock_old_tuple);
    3701             : 
    3702             :         Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
    3703             : 
    3704      283138 :         START_CRIT_SECTION();
    3705             : 
    3706             :         /* Clear obsolete visibility flags ... */
    3707      283138 :         oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3708      283138 :         oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3709      283138 :         HeapTupleClearHotUpdated(&oldtup);
    3710             :         /* ... and store info about transaction updating this tuple */
    3711             :         Assert(TransactionIdIsValid(xmax_lock_old_tuple));
    3712      283138 :         HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
    3713      283138 :         oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
    3714      283138 :         oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
    3715      283138 :         HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    3716             : 
    3717             :         /* temporarily make it look not-updated, but locked */
    3718      283138 :         oldtup.t_data->t_ctid = oldtup.t_self;
    3719             : 
    3720             :         /*
    3721             :          * Clear all-frozen bit on visibility map if needed. We could
    3722             :          * immediately reset ALL_VISIBLE, but given that the WAL logging
    3723             :          * overhead would be unchanged, that doesn't seem necessarily
    3724             :          * worthwhile.
    3725             :          */
    3726      284288 :         if (PageIsAllVisible(page) &&
    3727        1150 :             visibilitymap_clear(relation, block, vmbuffer,
    3728             :                                 VISIBILITYMAP_ALL_FROZEN))
    3729         856 :             cleared_all_frozen = true;
    3730             : 
    3731      283138 :         MarkBufferDirty(buffer);
    3732             : 
    3733      283138 :         if (RelationNeedsWAL(relation))
    3734             :         {
    3735             :             xl_heap_lock xlrec;
    3736             :             XLogRecPtr  recptr;
    3737             : 
    3738      262874 :             XLogBeginInsert();
    3739      262874 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3740             : 
    3741      262874 :             xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
    3742      262874 :             xlrec.xmax = xmax_lock_old_tuple;
    3743      525748 :             xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
    3744      262874 :                                                   oldtup.t_data->t_infomask2);
    3745      262874 :             xlrec.flags =
    3746      262874 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    3747      262874 :             XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
    3748      262874 :             recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    3749      262874 :             PageSetLSN(page, recptr);
    3750             :         }
    3751             : 
    3752      283138 :         END_CRIT_SECTION();
    3753             : 
    3754      283138 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3755             : 
    3756             :         /*
    3757             :          * Let the toaster do its thing, if needed.
    3758             :          *
    3759             :          * Note: below this point, heaptup is the data we actually intend to
    3760             :          * store into the relation; newtup is the caller's original untoasted
    3761             :          * data.
    3762             :          */
    3763      283138 :         if (need_toast)
    3764             :         {
    3765             :             /* Note we always use WAL and FSM during updates */
    3766        1960 :             heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
    3767        1960 :             newtupsize = MAXALIGN(heaptup->t_len);
    3768             :         }
    3769             :         else
    3770      281178 :             heaptup = newtup;
    3771             : 
    3772             :         /*
    3773             :          * Now, do we need a new page for the tuple, or not?  This is a bit
    3774             :          * tricky since someone else could have added tuples to the page while
    3775             :          * we weren't looking.  We have to recheck the available space after
    3776             :          * reacquiring the buffer lock.  But don't bother to do that if the
    3777             :          * former amount of free space is still not enough; it's unlikely
    3778             :          * there's more free now than before.
    3779             :          *
    3780             :          * What's more, if we need to get a new page, we will need to acquire
    3781             :          * buffer locks on both old and new pages.  To avoid deadlock against
    3782             :          * some other backend trying to get the same two locks in the other
    3783             :          * order, we must be consistent about the order we get the locks in.
    3784             :          * We use the rule "lock the lower-numbered page of the relation
    3785             :          * first".  To implement this, we must do RelationGetBufferForTuple
    3786             :          * while not holding the lock on the old page, and we must rely on it
    3787             :          * to get the locks on both pages in the correct order.
    3788             :          *
    3789             :          * Another consideration is that we need visibility map page pin(s) if
    3790             :          * we will have to clear the all-visible flag on either page.  If we
    3791             :          * call RelationGetBufferForTuple, we rely on it to acquire any such
    3792             :          * pins; but if we don't, we have to handle that here.  Hence we need
    3793             :          * a loop.
    3794             :          */
    3795             :         for (;;)
    3796             :         {
    3797      283138 :             if (newtupsize > pagefree)
    3798             :             {
    3799             :                 /* It doesn't fit, must use RelationGetBufferForTuple. */
    3800      282504 :                 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
    3801             :                                                    buffer, 0, NULL,
    3802             :                                                    &vmbuffer_new, &vmbuffer,
    3803             :                                                    0);
    3804             :                 /* We're all done. */
    3805      282504 :                 break;
    3806             :             }
    3807             :             /* Acquire VM page pin if needed and we don't have it. */
    3808         634 :             if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    3809           0 :                 visibilitymap_pin(relation, block, &vmbuffer);
    3810             :             /* Re-acquire the lock on the old tuple's page. */
    3811         634 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3812             :             /* Re-check using the up-to-date free space */
    3813         634 :             pagefree = PageGetHeapFreeSpace(page);
    3814         634 :             if (newtupsize > pagefree ||
    3815         634 :                 (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
    3816             :             {
    3817             :                 /*
    3818             :                  * Rats, it doesn't fit anymore, or somebody just now set the
    3819             :                  * all-visible flag.  We must now unlock and loop to avoid
    3820             :                  * deadlock.  Fortunately, this path should seldom be taken.
    3821             :                  */
    3822           0 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3823             :             }
    3824             :             else
    3825             :             {
    3826             :                 /* We're all done. */
    3827         634 :                 newbuf = buffer;
    3828         634 :                 break;
    3829             :             }
    3830             :         }
    3831             :     }
    3832             :     else
    3833             :     {
    3834             :         /* No TOAST work needed, and it'll fit on same page */
    3835      283470 :         newbuf = buffer;
    3836      283470 :         heaptup = newtup;
    3837             :     }
    3838             : 
    3839             :     /*
    3840             :      * We're about to do the actual update -- check for conflict first, to
    3841             :      * avoid possibly having to roll back work we've just done.
    3842             :      *
    3843             :      * This is safe without a recheck as long as there is no possibility of
    3844             :      * another process scanning the pages between this check and the update
    3845             :      * being visible to the scan (i.e., exclusive buffer content lock(s) are
    3846             :      * continuously held from this point until the tuple update is visible).
    3847             :      *
    3848             :      * For the new tuple the only check needed is at the relation level, but
    3849             :      * since both tuples are in the same relation and the check for oldtup
    3850             :      * will include checking the relation level, there is no benefit to a
    3851             :      * separate check for the new tuple.
    3852             :      */
    3853      566608 :     CheckForSerializableConflictIn(relation, &oldtup.t_self,
    3854             :                                    BufferGetBlockNumber(buffer));
    3855             : 
    3856             :     /*
    3857             :      * At this point newbuf and buffer are both pinned and locked, and newbuf
    3858             :      * has enough space for the new tuple.  If they are the same buffer, only
    3859             :      * one pin is held.
    3860             :      */
    3861             : 
    3862      566584 :     if (newbuf == buffer)
    3863             :     {
    3864             :         /*
    3865             :          * Since the new tuple is going into the same page, we might be able
    3866             :          * to do a HOT update.  Check if any of the index columns have been
    3867             :          * changed.
    3868             :          */
    3869      284080 :         if (!bms_overlap(modified_attrs, hot_attrs))
    3870             :         {
    3871      262872 :             use_hot_update = true;
    3872             : 
    3873             :             /*
    3874             :              * If none of the columns that are used in hot-blocking indexes
    3875             :              * were updated, we can apply HOT, but we do still need to check
    3876             :              * if we need to update the summarizing indexes, and update those
    3877             :              * indexes if the columns were updated, or we may fail to detect
    3878             :              * e.g. value bound changes in BRIN minmax indexes.
    3879             :              */
    3880      262872 :             if (bms_overlap(modified_attrs, sum_attrs))
    3881        3282 :                 summarized_update = true;
    3882             :         }
    3883             :     }
    3884             :     else
    3885             :     {
    3886             :         /* Set a hint that the old page could use prune/defrag */
    3887      282504 :         PageSetFull(page);
    3888             :     }
    3889             : 
    3890             :     /*
    3891             :      * Compute replica identity tuple before entering the critical section so
    3892             :      * we don't PANIC upon a memory allocation failure.
    3893             :      * ExtractReplicaIdentity() will return NULL if nothing needs to be
    3894             :      * logged.  Pass old key required as true only if the replica identity key
    3895             :      * columns are modified or it has external data.
    3896             :      */
    3897      566584 :     old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
    3898      566584 :                                            bms_overlap(modified_attrs, id_attrs) ||
    3899             :                                            id_has_external,
    3900             :                                            &old_key_copied);
    3901             : 
    3902             :     /* NO EREPORT(ERROR) from here till changes are logged */
    3903      566584 :     START_CRIT_SECTION();
    3904             : 
    3905             :     /*
    3906             :      * If this transaction commits, the old tuple will become DEAD sooner or
    3907             :      * later.  Set flag that this page is a candidate for pruning once our xid
    3908             :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    3909             :      * the subsequent page pruning will be a no-op and the hint will be
    3910             :      * cleared.
    3911             :      *
    3912             :      * XXX Should we set hint on newbuf as well?  If the transaction aborts,
    3913             :      * there would be a prunable tuple in the newbuf; but for now we choose
    3914             :      * not to optimize for aborts.  Note that heap_xlog_update must be kept in
    3915             :      * sync if this decision changes.
    3916             :      */
    3917      566584 :     PageSetPrunable(page, xid);
    3918             : 
    3919      566584 :     if (use_hot_update)
    3920             :     {
    3921             :         /* Mark the old tuple as HOT-updated */
    3922      262872 :         HeapTupleSetHotUpdated(&oldtup);
    3923             :         /* And mark the new tuple as heap-only */
    3924      262872 :         HeapTupleSetHeapOnly(heaptup);
    3925             :         /* Mark the caller's copy too, in case different from heaptup */
    3926      262872 :         HeapTupleSetHeapOnly(newtup);
    3927             :     }
    3928             :     else
    3929             :     {
    3930             :         /* Make sure tuples are correctly marked as not-HOT */
    3931      303712 :         HeapTupleClearHotUpdated(&oldtup);
    3932      303712 :         HeapTupleClearHeapOnly(heaptup);
    3933      303712 :         HeapTupleClearHeapOnly(newtup);
    3934             :     }
    3935             : 
    3936      566584 :     RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
    3937             : 
    3938             : 
    3939             :     /* Clear obsolete visibility flags, possibly set by ourselves above... */
    3940      566584 :     oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3941      566584 :     oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3942             :     /* ... and store info about transaction updating this tuple */
    3943             :     Assert(TransactionIdIsValid(xmax_old_tuple));
    3944      566584 :     HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
    3945      566584 :     oldtup.t_data->t_infomask |= infomask_old_tuple;
    3946      566584 :     oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
    3947      566584 :     HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    3948             : 
    3949             :     /* record address of new tuple in t_ctid of old one */
    3950      566584 :     oldtup.t_data->t_ctid = heaptup->t_self;
    3951             : 
    3952             :     /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
    3953      566584 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    3954             :     {
    3955        2246 :         all_visible_cleared = true;
    3956        2246 :         PageClearAllVisible(BufferGetPage(buffer));
    3957        2246 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    3958             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    3959             :     }
    3960      566584 :     if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
    3961             :     {
    3962         962 :         all_visible_cleared_new = true;
    3963         962 :         PageClearAllVisible(BufferGetPage(newbuf));
    3964         962 :         visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
    3965             :                             vmbuffer_new, VISIBILITYMAP_VALID_BITS);
    3966             :     }
    3967             : 
    3968      566584 :     if (newbuf != buffer)
    3969      282504 :         MarkBufferDirty(newbuf);
    3970      566584 :     MarkBufferDirty(buffer);
    3971             : 
    3972             :     /* XLOG stuff */
    3973      566584 :     if (RelationNeedsWAL(relation))
    3974             :     {
    3975             :         XLogRecPtr  recptr;
    3976             : 
    3977             :         /*
    3978             :          * For logical decoding we need combo CIDs to properly decode the
    3979             :          * catalog.
    3980             :          */
    3981      543936 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    3982             :         {
    3983        5266 :             log_heap_new_cid(relation, &oldtup);
    3984        5266 :             log_heap_new_cid(relation, heaptup);
    3985             :         }
    3986             : 
    3987      543936 :         recptr = log_heap_update(relation, buffer,
    3988             :                                  newbuf, &oldtup, heaptup,
    3989             :                                  old_key_tuple,
    3990             :                                  all_visible_cleared,
    3991             :                                  all_visible_cleared_new);
    3992      543936 :         if (newbuf != buffer)
    3993             :         {
    3994      262252 :             PageSetLSN(BufferGetPage(newbuf), recptr);
    3995             :         }
    3996      543936 :         PageSetLSN(BufferGetPage(buffer), recptr);
    3997             :     }
    3998             : 
    3999      566584 :     END_CRIT_SECTION();
    4000             : 
    4001      566584 :     if (newbuf != buffer)
    4002      282504 :         LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
    4003      566584 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    4004             : 
    4005             :     /*
    4006             :      * Mark old tuple for invalidation from system caches at next command
    4007             :      * boundary, and mark the new tuple for invalidation in case we abort. We
    4008             :      * have to do this before releasing the buffer because oldtup is in the
    4009             :      * buffer.  (heaptup is all in local memory, but it's necessary to process
    4010             :      * both tuple versions in one call to inval.c so we can avoid redundant
    4011             :      * sinval messages.)
    4012             :      */
    4013      566584 :     CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
    4014             : 
    4015             :     /* Now we can release the buffer(s) */
    4016      566584 :     if (newbuf != buffer)
    4017      282504 :         ReleaseBuffer(newbuf);
    4018      566584 :     ReleaseBuffer(buffer);
    4019      566584 :     if (BufferIsValid(vmbuffer_new))
    4020         962 :         ReleaseBuffer(vmbuffer_new);
    4021      566584 :     if (BufferIsValid(vmbuffer))
    4022        2246 :         ReleaseBuffer(vmbuffer);
    4023             : 
    4024             :     /*
    4025             :      * Release the lmgr tuple lock, if we had it.
    4026             :      */
    4027      566584 :     if (have_tuple_lock)
    4028          24 :         UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    4029             : 
    4030      566584 :     pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
    4031             : 
    4032             :     /*
    4033             :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    4034             :      * back to the caller's image, too.
    4035             :      */
    4036      566584 :     if (heaptup != newtup)
    4037             :     {
    4038        1866 :         newtup->t_self = heaptup->t_self;
    4039        1866 :         heap_freetuple(heaptup);
    4040             :     }
    4041             : 
    4042             :     /*
    4043             :      * If it is a HOT update, the update may still need to update summarized
    4044             :      * indexes, lest we fail to update those summaries and get incorrect
    4045             :      * results (for example, minmax bounds of the block may change with this
    4046             :      * update).
    4047             :      */
    4048      566584 :     if (use_hot_update)
    4049             :     {
    4050      262872 :         if (summarized_update)
    4051        3282 :             *update_indexes = TU_Summarizing;
    4052             :         else
    4053      259590 :             *update_indexes = TU_None;
    4054             :     }
    4055             :     else
    4056      303712 :         *update_indexes = TU_All;
    4057             : 
    4058      566584 :     if (old_key_tuple != NULL && old_key_copied)
    4059         160 :         heap_freetuple(old_key_tuple);
    4060             : 
    4061      566584 :     bms_free(hot_attrs);
    4062      566584 :     bms_free(sum_attrs);
    4063      566584 :     bms_free(key_attrs);
    4064      566584 :     bms_free(id_attrs);
    4065      566584 :     bms_free(modified_attrs);
    4066      566584 :     bms_free(interesting_attrs);
    4067             : 
    4068      566584 :     return TM_Ok;
    4069             : }
    4070             : 
    4071             : /*
    4072             :  * Check if the specified attribute's values are the same.  Subroutine for
    4073             :  * HeapDetermineColumnsInfo.
    4074             :  */
    4075             : static bool
    4076     1278964 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
    4077             :                  bool isnull1, bool isnull2)
    4078             : {
    4079             :     Form_pg_attribute att;
    4080             : 
    4081             :     /*
    4082             :      * If one value is NULL and other is not, then they are certainly not
    4083             :      * equal
    4084             :      */
    4085     1278964 :     if (isnull1 != isnull2)
    4086           6 :         return false;
    4087             : 
    4088             :     /*
    4089             :      * If both are NULL, they can be considered equal.
    4090             :      */
    4091     1278958 :     if (isnull1)
    4092        9982 :         return true;
    4093             : 
    4094             :     /*
    4095             :      * We do simple binary comparison of the two datums.  This may be overly
    4096             :      * strict because there can be multiple binary representations for the
    4097             :      * same logical value.  But we should be OK as long as there are no false
    4098             :      * positives.  Using a type-specific equality operator is messy because
    4099             :      * there could be multiple notions of equality in different operator
    4100             :      * classes; furthermore, we cannot safely invoke user-defined functions
    4101             :      * while holding exclusive buffer lock.
    4102             :      */
    4103     1268976 :     if (attrnum <= 0)
    4104             :     {
    4105             :         /* The only allowed system columns are OIDs, so do this */
    4106           0 :         return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
    4107             :     }
    4108             :     else
    4109             :     {
    4110             :         Assert(attrnum <= tupdesc->natts);
    4111     1268976 :         att = TupleDescAttr(tupdesc, attrnum - 1);
    4112     1268976 :         return datumIsEqual(value1, value2, att->attbyval, att->attlen);
    4113             :     }
    4114             : }
    4115             : 
    4116             : /*
    4117             :  * Check which columns are being updated.
    4118             :  *
    4119             :  * Given an updated tuple, determine (and return into the output bitmapset),
    4120             :  * from those listed as interesting, the set of columns that changed.
    4121             :  *
    4122             :  * has_external indicates if any of the unmodified attributes (from those
    4123             :  * listed as interesting) of the old tuple is a member of external_cols and is
    4124             :  * stored externally.
    4125             :  */
    4126             : static Bitmapset *
    4127      566910 : HeapDetermineColumnsInfo(Relation relation,
    4128             :                          Bitmapset *interesting_cols,
    4129             :                          Bitmapset *external_cols,
    4130             :                          HeapTuple oldtup, HeapTuple newtup,
    4131             :                          bool *has_external)
    4132             : {
    4133             :     int         attidx;
    4134      566910 :     Bitmapset  *modified = NULL;
    4135      566910 :     TupleDesc   tupdesc = RelationGetDescr(relation);
    4136             : 
    4137      566910 :     attidx = -1;
    4138     1845874 :     while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
    4139             :     {
    4140             :         /* attidx is zero-based, attrnum is the normal attribute number */
    4141     1278964 :         AttrNumber  attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
    4142             :         Datum       value1,
    4143             :                     value2;
    4144             :         bool        isnull1,
    4145             :                     isnull2;
    4146             : 
    4147             :         /*
    4148             :          * If it's a whole-tuple reference, say "not equal".  It's not really
    4149             :          * worth supporting this case, since it could only succeed after a
    4150             :          * no-op update, which is hardly a case worth optimizing for.
    4151             :          */
    4152     1278964 :         if (attrnum == 0)
    4153             :         {
    4154           0 :             modified = bms_add_member(modified, attidx);
    4155     1220162 :             continue;
    4156             :         }
    4157             : 
    4158             :         /*
    4159             :          * Likewise, automatically say "not equal" for any system attribute
    4160             :          * other than tableOID; we cannot expect these to be consistent in a
    4161             :          * HOT chain, or even to be set correctly yet in the new tuple.
    4162             :          */
    4163     1278964 :         if (attrnum < 0)
    4164             :         {
    4165           0 :             if (attrnum != TableOidAttributeNumber)
    4166             :             {
    4167           0 :                 modified = bms_add_member(modified, attidx);
    4168           0 :                 continue;
    4169             :             }
    4170             :         }
    4171             : 
    4172             :         /*
    4173             :          * Extract the corresponding values.  XXX this is pretty inefficient
    4174             :          * if there are many indexed columns.  Should we do a single
    4175             :          * heap_deform_tuple call on each tuple, instead?   But that doesn't
    4176             :          * work for system columns ...
    4177             :          */
    4178     1278964 :         value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
    4179     1278964 :         value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
    4180             : 
    4181     1278964 :         if (!heap_attr_equals(tupdesc, attrnum, value1,
    4182             :                               value2, isnull1, isnull2))
    4183             :         {
    4184       50494 :             modified = bms_add_member(modified, attidx);
    4185       50494 :             continue;
    4186             :         }
    4187             : 
    4188             :         /*
    4189             :          * No need to check attributes that can't be stored externally. Note
    4190             :          * that system attributes can't be stored externally.
    4191             :          */
    4192     1228470 :         if (attrnum < 0 || isnull1 ||
    4193     1218488 :             TupleDescAttr(tupdesc, attrnum - 1)->attlen != -1)
    4194     1169668 :             continue;
    4195             : 
    4196             :         /*
    4197             :          * Check if the old tuple's attribute is stored externally and is a
    4198             :          * member of external_cols.
    4199             :          */
    4200       58812 :         if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
    4201          10 :             bms_is_member(attidx, external_cols))
    4202           4 :             *has_external = true;
    4203             :     }
    4204             : 
    4205      566910 :     return modified;
    4206             : }
    4207             : 
    4208             : /*
    4209             :  *  simple_heap_update - replace a tuple
    4210             :  *
    4211             :  * This routine may be used to update a tuple when concurrent updates of
    4212             :  * the target tuple are not expected (for example, because we have a lock
    4213             :  * on the relation associated with the tuple).  Any failure is reported
    4214             :  * via ereport().
    4215             :  */
    4216             : void
    4217      190354 : simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup,
    4218             :                    TU_UpdateIndexes *update_indexes)
    4219             : {
    4220             :     TM_Result   result;
    4221             :     TM_FailureData tmfd;
    4222             :     LockTupleMode lockmode;
    4223             : 
    4224      190354 :     result = heap_update(relation, otid, tup,
    4225             :                          GetCurrentCommandId(true), InvalidSnapshot,
    4226             :                          true /* wait for commit */ ,
    4227             :                          &tmfd, &lockmode, update_indexes);
    4228      190354 :     switch (result)
    4229             :     {
    4230           0 :         case TM_SelfModified:
    4231             :             /* Tuple was already updated in current command? */
    4232           0 :             elog(ERROR, "tuple already updated by self");
    4233             :             break;
    4234             : 
    4235      190354 :         case TM_Ok:
    4236             :             /* done successfully */
    4237      190354 :             break;
    4238             : 
    4239           0 :         case TM_Updated:
    4240           0 :             elog(ERROR, "tuple concurrently updated");
    4241             :             break;
    4242             : 
    4243           0 :         case TM_Deleted:
    4244           0 :             elog(ERROR, "tuple concurrently deleted");
    4245             :             break;
    4246             : 
    4247           0 :         default:
    4248           0 :             elog(ERROR, "unrecognized heap_update status: %u", result);
    4249             :             break;
    4250             :     }
    4251      190354 : }
    4252             : 
    4253             : 
    4254             : /*
    4255             :  * Return the MultiXactStatus corresponding to the given tuple lock mode.
    4256             :  */
    4257             : static MultiXactStatus
    4258        2406 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
    4259             : {
    4260             :     int         retval;
    4261             : 
    4262        2406 :     if (is_update)
    4263         192 :         retval = tupleLockExtraInfo[mode].updstatus;
    4264             :     else
    4265        2214 :         retval = tupleLockExtraInfo[mode].lockstatus;
    4266             : 
    4267        2406 :     if (retval == -1)
    4268           0 :         elog(ERROR, "invalid lock tuple mode %d/%s", mode,
    4269             :              is_update ? "true" : "false");
    4270             : 
    4271        2406 :     return (MultiXactStatus) retval;
    4272             : }
    4273             : 
    4274             : /*
    4275             :  *  heap_lock_tuple - lock a tuple in shared or exclusive mode
    4276             :  *
    4277             :  * Note that this acquires a buffer pin, which the caller must release.
    4278             :  *
    4279             :  * Input parameters:
    4280             :  *  relation: relation containing tuple (caller must hold suitable lock)
    4281             :  *  tid: TID of tuple to lock
    4282             :  *  cid: current command ID (used for visibility test, and stored into
    4283             :  *      tuple's cmax if lock is successful)
    4284             :  *  mode: indicates if shared or exclusive tuple lock is desired
    4285             :  *  wait_policy: what to do if tuple lock is not available
    4286             :  *  follow_updates: if true, follow the update chain to also lock descendant
    4287             :  *      tuples.
    4288             :  *
    4289             :  * Output parameters:
    4290             :  *  *tuple: all fields filled in
    4291             :  *  *buffer: set to buffer holding tuple (pinned but not locked at exit)
    4292             :  *  *tmfd: filled in failure cases (see below)
    4293             :  *
    4294             :  * Function results are the same as the ones for table_tuple_lock().
    4295             :  *
    4296             :  * In the failure cases other than TM_Invisible, the routine fills
    4297             :  * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
    4298             :  * if necessary), and t_cmax (the last only for TM_SelfModified,
    4299             :  * since we cannot obtain cmax from a combo CID generated by another
    4300             :  * transaction).
    4301             :  * See comments for struct TM_FailureData for additional info.
    4302             :  *
    4303             :  * See README.tuplock for a thorough explanation of this mechanism.
    4304             :  */
    4305             : TM_Result
    4306      165362 : heap_lock_tuple(Relation relation, HeapTuple tuple,
    4307             :                 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
    4308             :                 bool follow_updates,
    4309             :                 Buffer *buffer, TM_FailureData *tmfd)
    4310             : {
    4311             :     TM_Result   result;
    4312      165362 :     ItemPointer tid = &(tuple->t_self);
    4313             :     ItemId      lp;
    4314             :     Page        page;
    4315      165362 :     Buffer      vmbuffer = InvalidBuffer;
    4316             :     BlockNumber block;
    4317             :     TransactionId xid,
    4318             :                 xmax;
    4319             :     uint16      old_infomask,
    4320             :                 new_infomask,
    4321             :                 new_infomask2;
    4322      165362 :     bool        first_time = true;
    4323      165362 :     bool        skip_tuple_lock = false;
    4324      165362 :     bool        have_tuple_lock = false;
    4325      165362 :     bool        cleared_all_frozen = false;
    4326             : 
    4327      165362 :     *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    4328      165362 :     block = ItemPointerGetBlockNumber(tid);
    4329             : 
    4330             :     /*
    4331             :      * Before locking the buffer, pin the visibility map page if it appears to
    4332             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    4333             :      * in the middle of changing this, so we'll need to recheck after we have
    4334             :      * the lock.
    4335             :      */
    4336      165362 :     if (PageIsAllVisible(BufferGetPage(*buffer)))
    4337        3318 :         visibilitymap_pin(relation, block, &vmbuffer);
    4338             : 
    4339      165362 :     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4340             : 
    4341      165362 :     page = BufferGetPage(*buffer);
    4342      165362 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    4343             :     Assert(ItemIdIsNormal(lp));
    4344             : 
    4345      165362 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    4346      165362 :     tuple->t_len = ItemIdGetLength(lp);
    4347      165362 :     tuple->t_tableOid = RelationGetRelid(relation);
    4348             : 
    4349      165390 : l3:
    4350      165390 :     result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
    4351             : 
    4352      165390 :     if (result == TM_Invisible)
    4353             :     {
    4354             :         /*
    4355             :          * This is possible, but only when locking a tuple for ON CONFLICT
    4356             :          * UPDATE.  We return this value here rather than throwing an error in
    4357             :          * order to give that case the opportunity to throw a more specific
    4358             :          * error.
    4359             :          */
    4360          24 :         result = TM_Invisible;
    4361          24 :         goto out_locked;
    4362             :     }
    4363      165366 :     else if (result == TM_BeingModified ||
    4364      151748 :              result == TM_Updated ||
    4365             :              result == TM_Deleted)
    4366             :     {
    4367             :         TransactionId xwait;
    4368             :         uint16      infomask;
    4369             :         uint16      infomask2;
    4370             :         bool        require_sleep;
    4371             :         ItemPointerData t_ctid;
    4372             : 
    4373             :         /* must copy state data before unlocking buffer */
    4374       13620 :         xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
    4375       13620 :         infomask = tuple->t_data->t_infomask;
    4376       13620 :         infomask2 = tuple->t_data->t_infomask2;
    4377       13620 :         ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
    4378             : 
    4379       13620 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4380             : 
    4381             :         /*
    4382             :          * If any subtransaction of the current top transaction already holds
    4383             :          * a lock as strong as or stronger than what we're requesting, we
    4384             :          * effectively hold the desired lock already.  We *must* succeed
    4385             :          * without trying to take the tuple lock, else we will deadlock
    4386             :          * against anyone wanting to acquire a stronger lock.
    4387             :          *
    4388             :          * Note we only do this the first time we loop on the HTSU result;
    4389             :          * there is no point in testing in subsequent passes, because
    4390             :          * evidently our own transaction cannot have acquired a new lock after
    4391             :          * the first time we checked.
    4392             :          */
    4393       13620 :         if (first_time)
    4394             :         {
    4395       13602 :             first_time = false;
    4396             : 
    4397       13602 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4398             :             {
    4399             :                 int         i;
    4400             :                 int         nmembers;
    4401             :                 MultiXactMember *members;
    4402             : 
    4403             :                 /*
    4404             :                  * We don't need to allow old multixacts here; if that had
    4405             :                  * been the case, HeapTupleSatisfiesUpdate would have returned
    4406             :                  * MayBeUpdated and we wouldn't be here.
    4407             :                  */
    4408             :                 nmembers =
    4409         174 :                     GetMultiXactIdMembers(xwait, &members, false,
    4410         174 :                                           HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    4411             : 
    4412         520 :                 for (i = 0; i < nmembers; i++)
    4413             :                 {
    4414             :                     /* only consider members of our own transaction */
    4415         374 :                     if (!TransactionIdIsCurrentTransactionId(members[i].xid))
    4416         276 :                         continue;
    4417             : 
    4418          98 :                     if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
    4419             :                     {
    4420          28 :                         pfree(members);
    4421          28 :                         result = TM_Ok;
    4422          28 :                         goto out_unlocked;
    4423             :                     }
    4424             :                     else
    4425             :                     {
    4426             :                         /*
    4427             :                          * Disable acquisition of the heavyweight tuple lock.
    4428             :                          * Otherwise, when promoting a weaker lock, we might
    4429             :                          * deadlock with another locker that has acquired the
    4430             :                          * heavyweight tuple lock and is waiting for our
    4431             :                          * transaction to finish.
    4432             :                          *
    4433             :                          * Note that in this case we still need to wait for
    4434             :                          * the multixact if required, to avoid acquiring
    4435             :                          * conflicting locks.
    4436             :                          */
    4437          70 :                         skip_tuple_lock = true;
    4438             :                     }
    4439             :                 }
    4440             : 
    4441         146 :                 if (members)
    4442         146 :                     pfree(members);
    4443             :             }
    4444       13428 :             else if (TransactionIdIsCurrentTransactionId(xwait))
    4445             :             {
    4446       10986 :                 switch (mode)
    4447             :                 {
    4448         272 :                     case LockTupleKeyShare:
    4449             :                         Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
    4450             :                                HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4451             :                                HEAP_XMAX_IS_EXCL_LOCKED(infomask));
    4452         272 :                         result = TM_Ok;
    4453         272 :                         goto out_unlocked;
    4454          12 :                     case LockTupleShare:
    4455          12 :                         if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4456          12 :                             HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4457             :                         {
    4458           0 :                             result = TM_Ok;
    4459           0 :                             goto out_unlocked;
    4460             :                         }
    4461          12 :                         break;
    4462         122 :                     case LockTupleNoKeyExclusive:
    4463         122 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4464             :                         {
    4465         100 :                             result = TM_Ok;
    4466         100 :                             goto out_unlocked;
    4467             :                         }
    4468          22 :                         break;
    4469       10580 :                     case LockTupleExclusive:
    4470       10580 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
    4471         502 :                             infomask2 & HEAP_KEYS_UPDATED)
    4472             :                         {
    4473         460 :                             result = TM_Ok;
    4474         460 :                             goto out_unlocked;
    4475             :                         }
    4476       10120 :                         break;
    4477             :                 }
    4478        2460 :             }
    4479             :         }
    4480             : 
    4481             :         /*
    4482             :          * Initially assume that we will have to wait for the locking
    4483             :          * transaction(s) to finish.  We check various cases below in which
    4484             :          * this can be turned off.
    4485             :          */
    4486       12760 :         require_sleep = true;
    4487       12760 :         if (mode == LockTupleKeyShare)
    4488             :         {
    4489             :             /*
    4490             :              * If we're requesting KeyShare, and there's no update present, we
    4491             :              * don't need to wait.  Even if there is an update, we can still
    4492             :              * continue if the key hasn't been modified.
    4493             :              *
    4494             :              * However, if there are updates, we need to walk the update chain
    4495             :              * to mark future versions of the row as locked, too.  That way,
    4496             :              * if somebody deletes that future version, we're protected
    4497             :              * against the key going away.  This locking of future versions
    4498             :              * could block momentarily, if a concurrent transaction is
    4499             :              * deleting a key; or it could return a value to the effect that
    4500             :              * the transaction deleting the key has already committed.  So we
    4501             :              * do this before re-locking the buffer; otherwise this would be
    4502             :              * prone to deadlocks.
    4503             :              *
    4504             :              * Note that the TID we're locking was grabbed before we unlocked
    4505             :              * the buffer.  For it to change while we're not looking, the
    4506             :              * other properties we're testing for below after re-locking the
    4507             :              * buffer would also change, in which case we would restart this
    4508             :              * loop above.
    4509             :              */
    4510        1170 :             if (!(infomask2 & HEAP_KEYS_UPDATED))
    4511             :             {
    4512             :                 bool        updated;
    4513             : 
    4514        1108 :                 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
    4515             : 
    4516             :                 /*
    4517             :                  * If there are updates, follow the update chain; bail out if
    4518             :                  * that cannot be done.
    4519             :                  */
    4520        1108 :                 if (follow_updates && updated)
    4521             :                 {
    4522             :                     TM_Result   res;
    4523             : 
    4524         100 :                     res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
    4525             :                                                   GetCurrentTransactionId(),
    4526             :                                                   mode);
    4527         100 :                     if (res != TM_Ok)
    4528             :                     {
    4529          12 :                         result = res;
    4530             :                         /* recovery code expects to have buffer lock held */
    4531          12 :                         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4532         358 :                         goto failed;
    4533             :                     }
    4534             :                 }
    4535             : 
    4536        1096 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4537             : 
    4538             :                 /*
    4539             :                  * Make sure it's still an appropriate lock, else start over.
    4540             :                  * Also, if it wasn't updated before we released the lock, but
    4541             :                  * is updated now, we start over too; the reason is that we
    4542             :                  * now need to follow the update chain to lock the new
    4543             :                  * versions.
    4544             :                  */
    4545        1096 :                 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
    4546          86 :                     ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
    4547          86 :                      !updated))
    4548          28 :                     goto l3;
    4549             : 
    4550             :                 /* Things look okay, so we can skip sleeping */
    4551        1096 :                 require_sleep = false;
    4552             : 
    4553             :                 /*
    4554             :                  * Note we allow Xmax to change here; other updaters/lockers
    4555             :                  * could have modified it before we grabbed the buffer lock.
    4556             :                  * However, this is not a problem, because with the recheck we
    4557             :                  * just did we ensure that they still don't conflict with the
    4558             :                  * lock we want.
    4559             :                  */
    4560             :             }
    4561             :         }
    4562       11590 :         else if (mode == LockTupleShare)
    4563             :         {
    4564             :             /*
    4565             :              * If we're requesting Share, we can similarly avoid sleeping if
    4566             :              * there's no update and no exclusive lock present.
    4567             :              */
    4568         882 :             if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
    4569         882 :                 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4570             :             {
    4571         870 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4572             : 
    4573             :                 /*
    4574             :                  * Make sure it's still an appropriate lock, else start over.
    4575             :                  * See above about allowing xmax to change.
    4576             :                  */
    4577         870 :                 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    4578         870 :                     HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
    4579           0 :                     goto l3;
    4580         870 :                 require_sleep = false;
    4581             :             }
    4582             :         }
    4583       10708 :         else if (mode == LockTupleNoKeyExclusive)
    4584             :         {
    4585             :             /*
    4586             :              * If we're requesting NoKeyExclusive, we might also be able to
    4587             :              * avoid sleeping; just ensure that there no conflicting lock
    4588             :              * already acquired.
    4589             :              */
    4590         304 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4591             :             {
    4592          52 :                 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    4593             :                                              mode, NULL))
    4594             :                 {
    4595             :                     /*
    4596             :                      * No conflict, but if the xmax changed under us in the
    4597             :                      * meantime, start over.
    4598             :                      */
    4599          26 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4600          26 :                     if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4601          26 :                         !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4602             :                                              xwait))
    4603           0 :                         goto l3;
    4604             : 
    4605             :                     /* otherwise, we're good */
    4606          26 :                     require_sleep = false;
    4607             :                 }
    4608             :             }
    4609         252 :             else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
    4610             :             {
    4611          30 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4612             : 
    4613             :                 /* if the xmax changed in the meantime, start over */
    4614          30 :                 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4615          30 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4616             :                                          xwait))
    4617           0 :                     goto l3;
    4618             :                 /* otherwise, we're good */
    4619          30 :                 require_sleep = false;
    4620             :             }
    4621             :         }
    4622             : 
    4623             :         /*
    4624             :          * As a check independent from those above, we can also avoid sleeping
    4625             :          * if the current transaction is the sole locker of the tuple.  Note
    4626             :          * that the strength of the lock already held is irrelevant; this is
    4627             :          * not about recording the lock in Xmax (which will be done regardless
    4628             :          * of this optimization, below).  Also, note that the cases where we
    4629             :          * hold a lock stronger than we are requesting are already handled
    4630             :          * above by not doing anything.
    4631             :          *
    4632             :          * Note we only deal with the non-multixact case here; MultiXactIdWait
    4633             :          * is well equipped to deal with this situation on its own.
    4634             :          */
    4635       23392 :         if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
    4636       10644 :             TransactionIdIsCurrentTransactionId(xwait))
    4637             :         {
    4638             :             /* ... but if the xmax changed in the meantime, start over */
    4639       10120 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4640       10120 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4641       10120 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4642             :                                      xwait))
    4643           0 :                 goto l3;
    4644             :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
    4645       10120 :             require_sleep = false;
    4646             :         }
    4647             : 
    4648             :         /*
    4649             :          * Time to sleep on the other transaction/multixact, if necessary.
    4650             :          *
    4651             :          * If the other transaction is an update/delete that's already
    4652             :          * committed, then sleeping cannot possibly do any good: if we're
    4653             :          * required to sleep, get out to raise an error instead.
    4654             :          *
    4655             :          * By here, we either have already acquired the buffer exclusive lock,
    4656             :          * or we must wait for the locking transaction or multixact; so below
    4657             :          * we ensure that we grab buffer lock after the sleep.
    4658             :          */
    4659       12748 :         if (require_sleep && (result == TM_Updated || result == TM_Deleted))
    4660             :         {
    4661         270 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4662         270 :             goto failed;
    4663             :         }
    4664       12478 :         else if (require_sleep)
    4665             :         {
    4666             :             /*
    4667             :              * Acquire tuple lock to establish our priority for the tuple, or
    4668             :              * die trying.  LockTuple will release us when we are next-in-line
    4669             :              * for the tuple.  We must do this even if we are share-locking,
    4670             :              * but not if we already have a weaker lock on the tuple.
    4671             :              *
    4672             :              * If we are forced to "start over" below, we keep the tuple lock;
    4673             :              * this arranges that we stay at the head of the line while
    4674             :              * rechecking tuple state.
    4675             :              */
    4676         336 :             if (!skip_tuple_lock &&
    4677         304 :                 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
    4678             :                                       &have_tuple_lock))
    4679             :             {
    4680             :                 /*
    4681             :                  * This can only happen if wait_policy is Skip and the lock
    4682             :                  * couldn't be obtained.
    4683             :                  */
    4684           2 :                 result = TM_WouldBlock;
    4685             :                 /* recovery code expects to have buffer lock held */
    4686           2 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4687           2 :                 goto failed;
    4688             :             }
    4689             : 
    4690         332 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4691             :             {
    4692          80 :                 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
    4693             : 
    4694             :                 /* We only ever lock tuples, never update them */
    4695          80 :                 if (status >= MultiXactStatusNoKeyUpdate)
    4696           0 :                     elog(ERROR, "invalid lock mode in heap_lock_tuple");
    4697             : 
    4698             :                 /* wait for multixact to end, or die trying  */
    4699          80 :                 switch (wait_policy)
    4700             :                 {
    4701          72 :                     case LockWaitBlock:
    4702          72 :                         MultiXactIdWait((MultiXactId) xwait, status, infomask,
    4703             :                                         relation, &tuple->t_self, XLTW_Lock, NULL);
    4704          72 :                         break;
    4705           4 :                     case LockWaitSkip:
    4706           4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    4707             :                                                         status, infomask, relation,
    4708             :                                                         NULL))
    4709             :                         {
    4710           4 :                             result = TM_WouldBlock;
    4711             :                             /* recovery code expects to have buffer lock held */
    4712           4 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4713           4 :                             goto failed;
    4714             :                         }
    4715           0 :                         break;
    4716           4 :                     case LockWaitError:
    4717           4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    4718             :                                                         status, infomask, relation,
    4719             :                                                         NULL))
    4720           4 :                             ereport(ERROR,
    4721             :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    4722             :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    4723             :                                             RelationGetRelationName(relation))));
    4724             : 
    4725           0 :                         break;
    4726             :                 }
    4727             : 
    4728             :                 /*
    4729             :                  * Of course, the multixact might not be done here: if we're
    4730             :                  * requesting a light lock mode, other transactions with light
    4731             :                  * locks could still be alive, as well as locks owned by our
    4732             :                  * own xact or other subxacts of this backend.  We need to
    4733             :                  * preserve the surviving MultiXact members.  Note that it
    4734             :                  * isn't absolutely necessary in the latter case, but doing so
    4735             :                  * is simpler.
    4736             :                  */
    4737          72 :             }
    4738             :             else
    4739             :             {
    4740             :                 /* wait for regular transaction to end, or die trying */
    4741         252 :                 switch (wait_policy)
    4742             :                 {
    4743         174 :                     case LockWaitBlock:
    4744         174 :                         XactLockTableWait(xwait, relation, &tuple->t_self,
    4745             :                                           XLTW_Lock);
    4746         174 :                         break;
    4747          66 :                     case LockWaitSkip:
    4748          66 :                         if (!ConditionalXactLockTableWait(xwait))
    4749             :                         {
    4750          66 :                             result = TM_WouldBlock;
    4751             :                             /* recovery code expects to have buffer lock held */
    4752          66 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4753          66 :                             goto failed;
    4754             :                         }
    4755           0 :                         break;
    4756          12 :                     case LockWaitError:
    4757          12 :                         if (!ConditionalXactLockTableWait(xwait))
    4758          12 :                             ereport(ERROR,
    4759             :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    4760             :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    4761             :                                             RelationGetRelationName(relation))));
    4762           0 :                         break;
    4763             :                 }
    4764         246 :             }
    4765             : 
    4766             :             /* if there are updates, follow the update chain */
    4767         246 :             if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
    4768             :             {
    4769             :                 TM_Result   res;
    4770             : 
    4771          76 :                 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
    4772             :                                               GetCurrentTransactionId(),
    4773             :                                               mode);
    4774          76 :                 if (res != TM_Ok)
    4775             :                 {
    4776           4 :                     result = res;
    4777             :                     /* recovery code expects to have buffer lock held */
    4778           4 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4779           4 :                     goto failed;
    4780             :                 }
    4781             :             }
    4782             : 
    4783         242 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4784             : 
    4785             :             /*
    4786             :              * xwait is done, but if xwait had just locked the tuple then some
    4787             :              * other xact could update this tuple before we get to this point.
    4788             :              * Check for xmax change, and start over if so.
    4789             :              */
    4790         242 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4791         218 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4792             :                                      xwait))
    4793          28 :                 goto l3;
    4794             : 
    4795         214 :             if (!(infomask & HEAP_XMAX_IS_MULTI))
    4796             :             {
    4797             :                 /*
    4798             :                  * Otherwise check if it committed or aborted.  Note we cannot
    4799             :                  * be here if the tuple was only locked by somebody who didn't
    4800             :                  * conflict with us; that would have been handled above.  So
    4801             :                  * that transaction must necessarily be gone by now.  But
    4802             :                  * don't check for this in the multixact case, because some
    4803             :                  * locker transactions might still be running.
    4804             :                  */
    4805         152 :                 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
    4806             :             }
    4807             :         }
    4808             : 
    4809             :         /* By here, we're certain that we hold buffer exclusive lock again */
    4810             : 
    4811             :         /*
    4812             :          * We may lock if previous xmax aborted, or if it committed but only
    4813             :          * locked the tuple without updating it; or if we didn't have to wait
    4814             :          * at all for whatever reason.
    4815             :          */
    4816       12356 :         if (!require_sleep ||
    4817         214 :             (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
    4818         278 :             HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    4819         124 :             HeapTupleHeaderIsOnlyLocked(tuple->t_data))
    4820       12244 :             result = TM_Ok;
    4821         112 :         else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
    4822          90 :             result = TM_Updated;
    4823             :         else
    4824          22 :             result = TM_Deleted;
    4825             :     }
    4826             : 
    4827      151746 : failed:
    4828      164460 :     if (result != TM_Ok)
    4829             :     {
    4830             :         Assert(result == TM_SelfModified || result == TM_Updated ||
    4831             :                result == TM_Deleted || result == TM_WouldBlock);
    4832             : 
    4833             :         /*
    4834             :          * When locking a tuple under LockWaitSkip semantics and we fail with
    4835             :          * TM_WouldBlock above, it's possible for concurrent transactions to
    4836             :          * release the lock and set HEAP_XMAX_INVALID in the meantime.  So
    4837             :          * this assert is slightly different from the equivalent one in
    4838             :          * heap_delete and heap_update.
    4839             :          */
    4840             :         Assert((result == TM_WouldBlock) ||
    4841             :                !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
    4842             :         Assert(result != TM_Updated ||
    4843             :                !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
    4844         482 :         tmfd->ctid = tuple->t_data->t_ctid;
    4845         482 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    4846         482 :         if (result == TM_SelfModified)
    4847          12 :             tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
    4848             :         else
    4849         470 :             tmfd->cmax = InvalidCommandId;
    4850         482 :         goto out_locked;
    4851             :     }
    4852             : 
    4853             :     /*
    4854             :      * If we didn't pin the visibility map page and the page has become all
    4855             :      * visible while we were busy locking the buffer, or during some
    4856             :      * subsequent window during which we had it unlocked, we'll have to unlock
    4857             :      * and re-lock, to avoid holding the buffer lock across I/O.  That's a bit
    4858             :      * unfortunate, especially since we'll now have to recheck whether the
    4859             :      * tuple has been locked or updated under us, but hopefully it won't
    4860             :      * happen very often.
    4861             :      */
    4862      163978 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    4863             :     {
    4864           0 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4865           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    4866           0 :         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4867           0 :         goto l3;
    4868             :     }
    4869             : 
    4870      163978 :     xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
    4871      163978 :     old_infomask = tuple->t_data->t_infomask;
    4872             : 
    4873             :     /*
    4874             :      * If this is the first possibly-multixact-able operation in the current
    4875             :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    4876             :      * certain that the transaction will never become a member of any older
    4877             :      * MultiXactIds than that.  (We have to do this even if we end up just
    4878             :      * using our own TransactionId below, since some other backend could
    4879             :      * incorporate our XID into a MultiXact immediately afterwards.)
    4880             :      */
    4881      163978 :     MultiXactIdSetOldestMember();
    4882             : 
    4883             :     /*
    4884             :      * Compute the new xmax and infomask to store into the tuple.  Note we do
    4885             :      * not modify the tuple just yet, because that would leave it in the wrong
    4886             :      * state if multixact.c elogs.
    4887             :      */
    4888      163978 :     compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
    4889             :                               GetCurrentTransactionId(), mode, false,
    4890             :                               &xid, &new_infomask, &new_infomask2);
    4891             : 
    4892      163978 :     START_CRIT_SECTION();
    4893             : 
    4894             :     /*
    4895             :      * Store transaction information of xact locking the tuple.
    4896             :      *
    4897             :      * Note: Cmax is meaningless in this context, so don't set it; this avoids
    4898             :      * possibly generating a useless combo CID.  Moreover, if we're locking a
    4899             :      * previously updated tuple, it's important to preserve the Cmax.
    4900             :      *
    4901             :      * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
    4902             :      * we would break the HOT chain.
    4903             :      */
    4904      163978 :     tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
    4905      163978 :     tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    4906      163978 :     tuple->t_data->t_infomask |= new_infomask;
    4907      163978 :     tuple->t_data->t_infomask2 |= new_infomask2;
    4908      163978 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    4909      163900 :         HeapTupleHeaderClearHotUpdated(tuple->t_data);
    4910      163978 :     HeapTupleHeaderSetXmax(tuple->t_data, xid);
    4911             : 
    4912             :     /*
    4913             :      * Make sure there is no forward chain link in t_ctid.  Note that in the
    4914             :      * cases where the tuple has been updated, we must not overwrite t_ctid,
    4915             :      * because it was set by the updater.  Moreover, if the tuple has been
    4916             :      * updated, we need to follow the update chain to lock the new versions of
    4917             :      * the tuple as well.
    4918             :      */
    4919      163978 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    4920      163900 :         tuple->t_data->t_ctid = *tid;
    4921             : 
    4922             :     /* Clear only the all-frozen bit on visibility map if needed */
    4923      167296 :     if (PageIsAllVisible(page) &&
    4924        3318 :         visibilitymap_clear(relation, block, vmbuffer,
    4925             :                             VISIBILITYMAP_ALL_FROZEN))
    4926          30 :         cleared_all_frozen = true;
    4927             : 
    4928             : 
    4929      163978 :     MarkBufferDirty(*buffer);
    4930             : 
    4931             :     /*
    4932             :      * XLOG stuff.  You might think that we don't need an XLOG record because
    4933             :      * there is no state change worth restoring after a crash.  You would be
    4934             :      * wrong however: we have just written either a TransactionId or a
    4935             :      * MultiXactId that may never have been seen on disk before, and we need
    4936             :      * to make sure that there are XLOG entries covering those ID numbers.
    4937             :      * Else the same IDs might be re-used after a crash, which would be
    4938             :      * disastrous if this page made it to disk before the crash.  Essentially
    4939             :      * we have to enforce the WAL log-before-data rule even in this case.
    4940             :      * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
    4941             :      * entries for everything anyway.)
    4942             :      */
    4943      163978 :     if (RelationNeedsWAL(relation))
    4944             :     {
    4945             :         xl_heap_lock xlrec;
    4946             :         XLogRecPtr  recptr;
    4947             : 
    4948      162856 :         XLogBeginInsert();
    4949      162856 :         XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
    4950             : 
    4951      162856 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    4952      162856 :         xlrec.xmax = xid;
    4953      325712 :         xlrec.infobits_set = compute_infobits(new_infomask,
    4954      162856 :                                               tuple->t_data->t_infomask2);
    4955      162856 :         xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    4956      162856 :         XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
    4957             : 
    4958             :         /* we don't decode row locks atm, so no need to log the origin */
    4959             : 
    4960      162856 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    4961             : 
    4962      162856 :         PageSetLSN(page, recptr);
    4963             :     }
    4964             : 
    4965      163978 :     END_CRIT_SECTION();
    4966             : 
    4967      163978 :     result = TM_Ok;
    4968             : 
    4969      164484 : out_locked:
    4970      164484 :     LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4971             : 
    4972      165344 : out_unlocked:
    4973      165344 :     if (BufferIsValid(vmbuffer))
    4974        3318 :         ReleaseBuffer(vmbuffer);
    4975             : 
    4976             :     /*
    4977             :      * Don't update the visibility map here. Locking a tuple doesn't change
    4978             :      * visibility info.
    4979             :      */
    4980             : 
    4981             :     /*
    4982             :      * Now that we have successfully marked the tuple as locked, we can
    4983             :      * release the lmgr tuple lock, if we had it.
    4984             :      */
    4985      165344 :     if (have_tuple_lock)
    4986         274 :         UnlockTupleTuplock(relation, tid, mode);
    4987             : 
    4988      165344 :     return result;
    4989             : }
    4990             : 
    4991             : /*
    4992             :  * Acquire heavyweight lock on the given tuple, in preparation for acquiring
    4993             :  * its normal, Xmax-based tuple lock.
    4994             :  *
    4995             :  * have_tuple_lock is an input and output parameter: on input, it indicates
    4996             :  * whether the lock has previously been acquired (and this function does
    4997             :  * nothing in that case).  If this function returns success, have_tuple_lock
    4998             :  * has been flipped to true.
    4999             :  *
    5000             :  * Returns false if it was unable to obtain the lock; this can only happen if
    5001             :  * wait_policy is Skip.
    5002             :  */
    5003             : static bool
    5004         514 : heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
    5005             :                      LockWaitPolicy wait_policy, bool *have_tuple_lock)
    5006             : {
    5007         514 :     if (*have_tuple_lock)
    5008          18 :         return true;
    5009             : 
    5010         496 :     switch (wait_policy)
    5011             :     {
    5012         414 :         case LockWaitBlock:
    5013         414 :             LockTupleTuplock(relation, tid, mode);
    5014         414 :             break;
    5015             : 
    5016          68 :         case LockWaitSkip:
    5017          68 :             if (!ConditionalLockTupleTuplock(relation, tid, mode))
    5018           2 :                 return false;
    5019          66 :             break;
    5020             : 
    5021          14 :         case LockWaitError:
    5022          14 :             if (!ConditionalLockTupleTuplock(relation, tid, mode))
    5023           2 :                 ereport(ERROR,
    5024             :                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    5025             :                          errmsg("could not obtain lock on row in relation \"%s\"",
    5026             :                                 RelationGetRelationName(relation))));
    5027          12 :             break;
    5028             :     }
    5029         492 :     *have_tuple_lock = true;
    5030             : 
    5031         492 :     return true;
    5032             : }
    5033             : 
    5034             : /*
    5035             :  * Given an original set of Xmax and infomask, and a transaction (identified by
    5036             :  * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
    5037             :  * corresponding infomasks to use on the tuple.
    5038             :  *
    5039             :  * Note that this might have side effects such as creating a new MultiXactId.
    5040             :  *
    5041             :  * Most callers will have called HeapTupleSatisfiesUpdate before this function;
    5042             :  * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
    5043             :  * but it was not running anymore. There is a race condition, which is that the
    5044             :  * MultiXactId may have finished since then, but that uncommon case is handled
    5045             :  * either here, or within MultiXactIdExpand.
    5046             :  *
    5047             :  * There is a similar race condition possible when the old xmax was a regular
    5048             :  * TransactionId.  We test TransactionIdIsInProgress again just to narrow the
    5049             :  * window, but it's still possible to end up creating an unnecessary
    5050             :  * MultiXactId.  Fortunately this is harmless.
    5051             :  */
    5052             : static void
    5053     3976380 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
    5054             :                           uint16 old_infomask2, TransactionId add_to_xmax,
    5055             :                           LockTupleMode mode, bool is_update,
    5056             :                           TransactionId *result_xmax, uint16 *result_infomask,
    5057             :                           uint16 *result_infomask2)
    5058             : {
    5059             :     TransactionId new_xmax;
    5060             :     uint16      new_infomask,
    5061             :                 new_infomask2;
    5062             : 
    5063             :     Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
    5064             : 
    5065     3976380 : l5:
    5066     3976380 :     new_infomask = 0;
    5067     3976380 :     new_infomask2 = 0;
    5068     3976380 :     if (old_infomask & HEAP_XMAX_INVALID)
    5069             :     {
    5070             :         /*
    5071             :          * No previous locker; we just insert our own TransactionId.
    5072             :          *
    5073             :          * Note that it's critical that this case be the first one checked,
    5074             :          * because there are several blocks below that come back to this one
    5075             :          * to implement certain optimizations; old_infomask might contain
    5076             :          * other dirty bits in those cases, but we don't really care.
    5077             :          */
    5078     3766286 :         if (is_update)
    5079             :         {
    5080     3321116 :             new_xmax = add_to_xmax;
    5081     3321116 :             if (mode == LockTupleExclusive)
    5082     2826260 :                 new_infomask2 |= HEAP_KEYS_UPDATED;
    5083             :         }
    5084             :         else
    5085             :         {
    5086      445170 :             new_infomask |= HEAP_XMAX_LOCK_ONLY;
    5087      445170 :             switch (mode)
    5088             :             {
    5089        4932 :                 case LockTupleKeyShare:
    5090        4932 :                     new_xmax = add_to_xmax;
    5091        4932 :                     new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
    5092        4932 :                     break;
    5093        1416 :                 case LockTupleShare:
    5094        1416 :                     new_xmax = add_to_xmax;
    5095        1416 :                     new_infomask |= HEAP_XMAX_SHR_LOCK;
    5096        1416 :                     break;
    5097      249714 :                 case LockTupleNoKeyExclusive:
    5098      249714 :                     new_xmax = add_to_xmax;
    5099      249714 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    5100      249714 :                     break;
    5101      189108 :                 case LockTupleExclusive:
    5102      189108 :                     new_xmax = add_to_xmax;
    5103      189108 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    5104      189108 :                     new_infomask2 |= HEAP_KEYS_UPDATED;
    5105      189108 :                     break;
    5106           0 :                 default:
    5107           0 :                     new_xmax = InvalidTransactionId;    /* silence compiler */
    5108           0 :                     elog(ERROR, "invalid lock mode");
    5109             :             }
    5110             :         }
    5111             :     }
    5112      210094 :     else if (old_infomask & HEAP_XMAX_IS_MULTI)
    5113             :     {
    5114             :         MultiXactStatus new_status;
    5115             : 
    5116             :         /*
    5117             :          * Currently we don't allow XMAX_COMMITTED to be set for multis, so
    5118             :          * cross-check.
    5119             :          */
    5120             :         Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
    5121             : 
    5122             :         /*
    5123             :          * A multixact together with LOCK_ONLY set but neither lock bit set
    5124             :          * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
    5125             :          * anymore.  This check is critical for databases upgraded by
    5126             :          * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
    5127             :          * that such multis are never passed.
    5128             :          */
    5129         246 :         if (HEAP_LOCKED_UPGRADED(old_infomask))
    5130             :         {
    5131           0 :             old_infomask &= ~HEAP_XMAX_IS_MULTI;
    5132           0 :             old_infomask |= HEAP_XMAX_INVALID;
    5133           0 :             goto l5;
    5134             :         }
    5135             : 
    5136             :         /*
    5137             :          * If the XMAX is already a MultiXactId, then we need to expand it to
    5138             :          * include add_to_xmax; but if all the members were lockers and are
    5139             :          * all gone, we can do away with the IS_MULTI bit and just set
    5140             :          * add_to_xmax as the only locker/updater.  If all lockers are gone
    5141             :          * and we have an updater that aborted, we can also do without a
    5142             :          * multi.
    5143             :          *
    5144             :          * The cost of doing GetMultiXactIdMembers would be paid by
    5145             :          * MultiXactIdExpand if we weren't to do this, so this check is not
    5146             :          * incurring extra work anyhow.
    5147             :          */
    5148         246 :         if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
    5149             :         {
    5150          46 :             if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
    5151          16 :                 !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
    5152             :                                                                 old_infomask)))
    5153             :             {
    5154             :                 /*
    5155             :                  * Reset these bits and restart; otherwise fall through to
    5156             :                  * create a new multi below.
    5157             :                  */
    5158          46 :                 old_infomask &= ~HEAP_XMAX_IS_MULTI;
    5159          46 :                 old_infomask |= HEAP_XMAX_INVALID;
    5160          46 :                 goto l5;
    5161             :             }
    5162             :         }
    5163             : 
    5164         200 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5165             : 
    5166         200 :         new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
    5167             :                                      new_status);
    5168         200 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5169             :     }
    5170      209848 :     else if (old_infomask & HEAP_XMAX_COMMITTED)
    5171             :     {
    5172             :         /*
    5173             :          * It's a committed update, so we need to preserve him as updater of
    5174             :          * the tuple.
    5175             :          */
    5176             :         MultiXactStatus status;
    5177             :         MultiXactStatus new_status;
    5178             : 
    5179          26 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5180           0 :             status = MultiXactStatusUpdate;
    5181             :         else
    5182          26 :             status = MultiXactStatusNoKeyUpdate;
    5183             : 
    5184          26 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5185             : 
    5186             :         /*
    5187             :          * since it's not running, it's obviously impossible for the old
    5188             :          * updater to be identical to the current one, so we need not check
    5189             :          * for that case as we do in the block above.
    5190             :          */
    5191          26 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    5192          26 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5193             :     }
    5194      209822 :     else if (TransactionIdIsInProgress(xmax))
    5195             :     {
    5196             :         /*
    5197             :          * If the XMAX is a valid, in-progress TransactionId, then we need to
    5198             :          * create a new MultiXactId that includes both the old locker or
    5199             :          * updater and our own TransactionId.
    5200             :          */
    5201             :         MultiXactStatus new_status;
    5202             :         MultiXactStatus old_status;
    5203             :         LockTupleMode old_mode;
    5204             : 
    5205      209804 :         if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5206             :         {
    5207      209752 :             if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5208       11238 :                 old_status = MultiXactStatusForKeyShare;
    5209      198514 :             else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5210         862 :                 old_status = MultiXactStatusForShare;
    5211      197652 :             else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5212             :             {
    5213      197652 :                 if (old_infomask2 & HEAP_KEYS_UPDATED)
    5214      185430 :                     old_status = MultiXactStatusForUpdate;
    5215             :                 else
    5216       12222 :                     old_status = MultiXactStatusForNoKeyUpdate;
    5217             :             }
    5218             :             else
    5219             :             {
    5220             :                 /*
    5221             :                  * LOCK_ONLY can be present alone only when a page has been
    5222             :                  * upgraded by pg_upgrade.  But in that case,
    5223             :                  * TransactionIdIsInProgress() should have returned false.  We
    5224             :                  * assume it's no longer locked in this case.
    5225             :                  */
    5226           0 :                 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
    5227           0 :                 old_infomask |= HEAP_XMAX_INVALID;
    5228           0 :                 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
    5229           0 :                 goto l5;
    5230             :             }
    5231             :         }
    5232             :         else
    5233             :         {
    5234             :             /* it's an update, but which kind? */
    5235          52 :             if (old_infomask2 & HEAP_KEYS_UPDATED)
    5236           0 :                 old_status = MultiXactStatusUpdate;
    5237             :             else
    5238          52 :                 old_status = MultiXactStatusNoKeyUpdate;
    5239             :         }
    5240             : 
    5241      209804 :         old_mode = TUPLOCK_from_mxstatus(old_status);
    5242             : 
    5243             :         /*
    5244             :          * If the lock to be acquired is for the same TransactionId as the
    5245             :          * existing lock, there's an optimization possible: consider only the
    5246             :          * strongest of both locks as the only one present, and restart.
    5247             :          */
    5248      209804 :         if (xmax == add_to_xmax)
    5249             :         {
    5250             :             /*
    5251             :              * Note that it's not possible for the original tuple to be
    5252             :              * updated: we wouldn't be here because the tuple would have been
    5253             :              * invisible and we wouldn't try to update it.  As a subtlety,
    5254             :              * this code can also run when traversing an update chain to lock
    5255             :              * future versions of a tuple.  But we wouldn't be here either,
    5256             :              * because the add_to_xmax would be different from the original
    5257             :              * updater.
    5258             :              */
    5259             :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5260             : 
    5261             :             /* acquire the strongest of both */
    5262      207770 :             if (mode < old_mode)
    5263      104330 :                 mode = old_mode;
    5264             :             /* mustn't touch is_update */
    5265             : 
    5266      207770 :             old_infomask |= HEAP_XMAX_INVALID;
    5267      207770 :             goto l5;
    5268             :         }
    5269             : 
    5270             :         /* otherwise, just fall back to creating a new multixact */
    5271        2034 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5272        2034 :         new_xmax = MultiXactIdCreate(xmax, old_status,
    5273             :                                      add_to_xmax, new_status);
    5274        2034 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5275             :     }
    5276          28 :     else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
    5277          10 :              TransactionIdDidCommit(xmax))
    5278           2 :     {
    5279             :         /*
    5280             :          * It's a committed update, so we gotta preserve him as updater of the
    5281             :          * tuple.
    5282             :          */
    5283             :         MultiXactStatus status;
    5284             :         MultiXactStatus new_status;
    5285             : 
    5286           2 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5287           0 :             status = MultiXactStatusUpdate;
    5288             :         else
    5289           2 :             status = MultiXactStatusNoKeyUpdate;
    5290             : 
    5291           2 :         new_status = get_mxact_status_for_lock(mode, is_update);
    5292             : 
    5293             :         /*
    5294             :          * since it's not running, it's obviously impossible for the old
    5295             :          * updater to be identical to the current one, so we need not check
    5296             :          * for that case as we do in the block above.
    5297             :          */
    5298           2 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    5299           2 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    5300             :     }
    5301             :     else
    5302             :     {
    5303             :         /*
    5304             :          * Can get here iff the locking/updating transaction was running when
    5305             :          * the infomask was extracted from the tuple, but finished before
    5306             :          * TransactionIdIsInProgress got to run.  Deal with it as if there was
    5307             :          * no locker at all in the first place.
    5308             :          */
    5309          16 :         old_infomask |= HEAP_XMAX_INVALID;
    5310          16 :         goto l5;
    5311             :     }
    5312             : 
    5313     3768548 :     *result_infomask = new_infomask;
    5314     3768548 :     *result_infomask2 = new_infomask2;
    5315     3768548 :     *result_xmax = new_xmax;
    5316     3768548 : }
    5317             : 
    5318             : /*
    5319             :  * Subroutine for heap_lock_updated_tuple_rec.
    5320             :  *
    5321             :  * Given a hypothetical multixact status held by the transaction identified
    5322             :  * with the given xid, does the current transaction need to wait, fail, or can
    5323             :  * it continue if it wanted to acquire a lock of the given mode?  "needwait"
    5324             :  * is set to true if waiting is necessary; if it can continue, then TM_Ok is
    5325             :  * returned.  If the lock is already held by the current transaction, return
    5326             :  * TM_SelfModified.  In case of a conflict with another transaction, a
    5327             :  * different HeapTupleSatisfiesUpdate return code is returned.
    5328             :  *
    5329             :  * The held status is said to be hypothetical because it might correspond to a
    5330             :  * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
    5331             :  * way for simplicity of API.
    5332             :  */
    5333             : static TM_Result
    5334          64 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
    5335             :                            LockTupleMode mode, HeapTuple tup,
    5336             :                            bool *needwait)
    5337             : {
    5338             :     MultiXactStatus wantedstatus;
    5339             : 
    5340          64 :     *needwait = false;
    5341          64 :     wantedstatus = get_mxact_status_for_lock(mode, false);
    5342             : 
    5343             :     /*
    5344             :      * Note: we *must* check TransactionIdIsInProgress before
    5345             :      * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
    5346             :      * for an explanation.
    5347             :      */
    5348          64 :     if (TransactionIdIsCurrentTransactionId(xid))
    5349             :     {
    5350             :         /*
    5351             :          * The tuple has already been locked by our own transaction.  This is
    5352             :          * very rare but can happen if multiple transactions are trying to
    5353             :          * lock an ancient version of the same tuple.
    5354             :          */
    5355           0 :         return TM_SelfModified;
    5356             :     }
    5357          64 :     else if (TransactionIdIsInProgress(xid))
    5358             :     {
    5359             :         /*
    5360             :          * If the locking transaction is running, what we do depends on
    5361             :          * whether the lock modes conflict: if they do, then we must wait for
    5362             :          * it to finish; otherwise we can fall through to lock this tuple
    5363             :          * version without waiting.
    5364             :          */
    5365          32 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5366          32 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5367             :         {
    5368          16 :             *needwait = true;
    5369             :         }
    5370             : 
    5371             :         /*
    5372             :          * If we set needwait above, then this value doesn't matter;
    5373             :          * otherwise, this value signals to caller that it's okay to proceed.
    5374             :          */
    5375          32 :         return TM_Ok;
    5376             :     }
    5377          32 :     else if (TransactionIdDidAbort(xid))
    5378           6 :         return TM_Ok;
    5379          26 :     else if (TransactionIdDidCommit(xid))
    5380             :     {
    5381             :         /*
    5382             :          * The other transaction committed.  If it was only a locker, then the
    5383             :          * lock is completely gone now and we can return success; but if it
    5384             :          * was an update, then what we do depends on whether the two lock
    5385             :          * modes conflict.  If they conflict, then we must report error to
    5386             :          * caller. But if they don't, we can fall through to allow the current
    5387             :          * transaction to lock the tuple.
    5388             :          *
    5389             :          * Note: the reason we worry about ISUPDATE here is because as soon as
    5390             :          * a transaction ends, all its locks are gone and meaningless, and
    5391             :          * thus we can ignore them; whereas its updates persist.  In the
    5392             :          * TransactionIdIsInProgress case, above, we don't need to check
    5393             :          * because we know the lock is still "alive" and thus a conflict needs
    5394             :          * always be checked.
    5395             :          */
    5396          26 :         if (!ISUPDATE_from_mxstatus(status))
    5397           8 :             return TM_Ok;
    5398             : 
    5399          18 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5400          18 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5401             :         {
    5402             :             /* bummer */
    5403          16 :             if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
    5404          12 :                 return TM_Updated;
    5405             :             else
    5406           4 :                 return TM_Deleted;
    5407             :         }
    5408             : 
    5409           2 :         return TM_Ok;
    5410             :     }
    5411             : 
    5412             :     /* Not in progress, not aborted, not committed -- must have crashed */
    5413           0 :     return TM_Ok;
    5414             : }
    5415             : 
    5416             : 
    5417             : /*
    5418             :  * Recursive part of heap_lock_updated_tuple
    5419             :  *
    5420             :  * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
    5421             :  * xid with the given mode; if this tuple is updated, recurse to lock the new
    5422             :  * version as well.
    5423             :  */
    5424             : static TM_Result
    5425         160 : heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
    5426             :                             LockTupleMode mode)
    5427             : {
    5428             :     TM_Result   result;
    5429             :     ItemPointerData tupid;
    5430             :     HeapTupleData mytup;
    5431             :     Buffer      buf;
    5432             :     uint16      new_infomask,
    5433             :                 new_infomask2,
    5434             :                 old_infomask,
    5435             :                 old_infomask2;
    5436             :     TransactionId xmax,
    5437             :                 new_xmax;
    5438         160 :     TransactionId priorXmax = InvalidTransactionId;
    5439         160 :     bool        cleared_all_frozen = false;
    5440             :     bool        pinned_desired_page;
    5441         160 :     Buffer      vmbuffer = InvalidBuffer;
    5442             :     BlockNumber block;
    5443             : 
    5444         160 :     ItemPointerCopy(tid, &tupid);
    5445             : 
    5446             :     for (;;)
    5447             :     {
    5448         166 :         new_infomask = 0;
    5449         166 :         new_xmax = InvalidTransactionId;
    5450         166 :         block = ItemPointerGetBlockNumber(&tupid);
    5451         166 :         ItemPointerCopy(&tupid, &(mytup.t_self));
    5452             : 
    5453         166 :         if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
    5454             :         {
    5455             :             /*
    5456             :              * if we fail to find the updated version of the tuple, it's
    5457             :              * because it was vacuumed/pruned away after its creator
    5458             :              * transaction aborted.  So behave as if we got to the end of the
    5459             :              * chain, and there's no further tuple to lock: return success to
    5460             :              * caller.
    5461             :              */
    5462           0 :             result = TM_Ok;
    5463           0 :             goto out_unlocked;
    5464             :         }
    5465             : 
    5466         166 : l4:
    5467         182 :         CHECK_FOR_INTERRUPTS();
    5468             : 
    5469             :         /*
    5470             :          * Before locking the buffer, pin the visibility map page if it
    5471             :          * appears to be necessary.  Since we haven't got the lock yet,
    5472             :          * someone else might be in the middle of changing this, so we'll need
    5473             :          * to recheck after we have the lock.
    5474             :          */
    5475         182 :         if (PageIsAllVisible(BufferGetPage(buf)))
    5476             :         {
    5477           0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5478           0 :             pinned_desired_page = true;
    5479             :         }
    5480             :         else
    5481         182 :             pinned_desired_page = false;
    5482             : 
    5483         182 :         LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5484             : 
    5485             :         /*
    5486             :          * If we didn't pin the visibility map page and the page has become
    5487             :          * all visible while we were busy locking the buffer, we'll have to
    5488             :          * unlock and re-lock, to avoid holding the buffer lock across I/O.
    5489             :          * That's a bit unfortunate, but hopefully shouldn't happen often.
    5490             :          *
    5491             :          * Note: in some paths through this function, we will reach here
    5492             :          * holding a pin on a vm page that may or may not be the one matching
    5493             :          * this page.  If this page isn't all-visible, we won't use the vm
    5494             :          * page, but we hold onto such a pin till the end of the function.
    5495             :          */
    5496         182 :         if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
    5497             :         {
    5498           0 :             LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5499           0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5500           0 :             LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5501             :         }
    5502             : 
    5503             :         /*
    5504             :          * Check the tuple XMIN against prior XMAX, if any.  If we reached the
    5505             :          * end of the chain, we're done, so return success.
    5506             :          */
    5507         188 :         if (TransactionIdIsValid(priorXmax) &&
    5508           6 :             !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
    5509             :                                  priorXmax))
    5510             :         {
    5511           0 :             result = TM_Ok;
    5512           0 :             goto out_locked;
    5513             :         }
    5514             : 
    5515             :         /*
    5516             :          * Also check Xmin: if this tuple was created by an aborted
    5517             :          * (sub)transaction, then we already locked the last live one in the
    5518             :          * chain, thus we're done, so return success.
    5519             :          */
    5520         182 :         if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
    5521             :         {
    5522          26 :             result = TM_Ok;
    5523          26 :             goto out_locked;
    5524             :         }
    5525             : 
    5526         156 :         old_infomask = mytup.t_data->t_infomask;
    5527         156 :         old_infomask2 = mytup.t_data->t_infomask2;
    5528         156 :         xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5529             : 
    5530             :         /*
    5531             :          * If this tuple version has been updated or locked by some concurrent
    5532             :          * transaction(s), what we do depends on whether our lock mode
    5533             :          * conflicts with what those other transactions hold, and also on the
    5534             :          * status of them.
    5535             :          */
    5536         156 :         if (!(old_infomask & HEAP_XMAX_INVALID))
    5537             :         {
    5538             :             TransactionId rawxmax;
    5539             :             bool        needwait;
    5540             : 
    5541          60 :             rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5542          60 :             if (old_infomask & HEAP_XMAX_IS_MULTI)
    5543             :             {
    5544             :                 int         nmembers;
    5545             :                 int         i;
    5546             :                 MultiXactMember *members;
    5547             : 
    5548             :                 /*
    5549             :                  * We don't need a test for pg_upgrade'd tuples: this is only
    5550             :                  * applied to tuples after the first in an update chain.  Said
    5551             :                  * first tuple in the chain may well be locked-in-9.2-and-
    5552             :                  * pg_upgraded, but that one was already locked by our caller,
    5553             :                  * not us; and any subsequent ones cannot be because our
    5554             :                  * caller must necessarily have obtained a snapshot later than
    5555             :                  * the pg_upgrade itself.
    5556             :                  */
    5557             :                 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
    5558             : 
    5559           2 :                 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
    5560           2 :                                                  HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5561           8 :                 for (i = 0; i < nmembers; i++)
    5562             :                 {
    5563           6 :                     result = test_lockmode_for_conflict(members[i].status,
    5564           6 :                                                         members[i].xid,
    5565             :                                                         mode,
    5566             :                                                         &mytup,
    5567             :                                                         &needwait);
    5568             : 
    5569             :                     /*
    5570             :                      * If the tuple was already locked by ourselves in a
    5571             :                      * previous iteration of this (say heap_lock_tuple was
    5572             :                      * forced to restart the locking loop because of a change
    5573             :                      * in xmax), then we hold the lock already on this tuple
    5574             :                      * version and we don't need to do anything; and this is
    5575             :                      * not an error condition either.  We just need to skip
    5576             :                      * this tuple and continue locking the next version in the
    5577             :                      * update chain.
    5578             :                      */
    5579           6 :                     if (result == TM_SelfModified)
    5580             :                     {
    5581           0 :                         pfree(members);
    5582           0 :                         goto next;
    5583             :                     }
    5584             : 
    5585           6 :                     if (needwait)
    5586             :                     {
    5587           0 :                         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5588           0 :                         XactLockTableWait(members[i].xid, rel,
    5589             :                                           &mytup.t_self,
    5590             :                                           XLTW_LockUpdated);
    5591           0 :                         pfree(members);
    5592           0 :                         goto l4;
    5593             :                     }
    5594           6 :                     if (result != TM_Ok)
    5595             :                     {
    5596           0 :                         pfree(members);
    5597           0 :                         goto out_locked;
    5598             :                     }
    5599             :                 }
    5600           2 :                 if (members)
    5601           2 :                     pfree(members);
    5602             :             }
    5603             :             else
    5604             :             {
    5605             :                 MultiXactStatus status;
    5606             : 
    5607             :                 /*
    5608             :                  * For a non-multi Xmax, we first need to compute the
    5609             :                  * corresponding MultiXactStatus by using the infomask bits.
    5610             :                  */
    5611          58 :                 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5612             :                 {
    5613          20 :                     if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5614          20 :                         status = MultiXactStatusForKeyShare;
    5615           0 :                     else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5616           0 :                         status = MultiXactStatusForShare;
    5617           0 :                     else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5618             :                     {
    5619           0 :                         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5620           0 :                             status = MultiXactStatusForUpdate;
    5621             :                         else
    5622           0 :                             status = MultiXactStatusForNoKeyUpdate;
    5623             :                     }
    5624             :                     else
    5625             :                     {
    5626             :                         /*
    5627             :                          * LOCK_ONLY present alone (a pg_upgraded tuple marked
    5628             :                          * as share-locked in the old cluster) shouldn't be
    5629             :                          * seen in the middle of an update chain.
    5630             :                          */
    5631           0 :                         elog(ERROR, "invalid lock status in tuple");
    5632             :                     }
    5633             :                 }
    5634             :                 else
    5635             :                 {
    5636             :                     /* it's an update, but which kind? */
    5637          38 :                     if (old_infomask2 & HEAP_KEYS_UPDATED)
    5638          28 :                         status = MultiXactStatusUpdate;
    5639             :                     else
    5640          10 :                         status = MultiXactStatusNoKeyUpdate;
    5641             :                 }
    5642             : 
    5643          58 :                 result = test_lockmode_for_conflict(status, rawxmax, mode,
    5644             :                                                     &mytup, &needwait);
    5645             : 
    5646             :                 /*
    5647             :                  * If the tuple was already locked by ourselves in a previous
    5648             :                  * iteration of this (say heap_lock_tuple was forced to
    5649             :                  * restart the locking loop because of a change in xmax), then
    5650             :                  * we hold the lock already on this tuple version and we don't
    5651             :                  * need to do anything; and this is not an error condition
    5652             :                  * either.  We just need to skip this tuple and continue
    5653             :                  * locking the next version in the update chain.
    5654             :                  */
    5655          58 :                 if (result == TM_SelfModified)
    5656           0 :                     goto next;
    5657             : 
    5658          58 :                 if (needwait)
    5659             :                 {
    5660          16 :                     LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5661          16 :                     XactLockTableWait(rawxmax, rel, &mytup.t_self,
    5662             :                                       XLTW_LockUpdated);
    5663          16 :                     goto l4;
    5664             :                 }
    5665          42 :                 if (result != TM_Ok)
    5666             :                 {
    5667          16 :                     goto out_locked;
    5668             :                 }
    5669             :             }
    5670             :         }
    5671             : 
    5672             :         /* compute the new Xmax and infomask values for the tuple ... */
    5673         124 :         compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
    5674             :                                   xid, mode, false,
    5675             :                                   &new_xmax, &new_infomask, &new_infomask2);
    5676             : 
    5677         124 :         if (PageIsAllVisible(BufferGetPage(buf)) &&
    5678           0 :             visibilitymap_clear(rel, block, vmbuffer,
    5679             :                                 VISIBILITYMAP_ALL_FROZEN))
    5680           0 :             cleared_all_frozen = true;
    5681             : 
    5682         124 :         START_CRIT_SECTION();
    5683             : 
    5684             :         /* ... and set them */
    5685         124 :         HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
    5686         124 :         mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
    5687         124 :         mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    5688         124 :         mytup.t_data->t_infomask |= new_infomask;
    5689         124 :         mytup.t_data->t_infomask2 |= new_infomask2;
    5690             : 
    5691         124 :         MarkBufferDirty(buf);
    5692             : 
    5693             :         /* XLOG stuff */
    5694         124 :         if (RelationNeedsWAL(rel))
    5695             :         {
    5696             :             xl_heap_lock_updated xlrec;
    5697             :             XLogRecPtr  recptr;
    5698         124 :             Page        page = BufferGetPage(buf);
    5699             : 
    5700         124 :             XLogBeginInsert();
    5701         124 :             XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    5702             : 
    5703         124 :             xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
    5704         124 :             xlrec.xmax = new_xmax;
    5705         124 :             xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
    5706         124 :             xlrec.flags =
    5707         124 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    5708             : 
    5709         124 :             XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
    5710             : 
    5711         124 :             recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
    5712             : 
    5713         124 :             PageSetLSN(page, recptr);
    5714             :         }
    5715             : 
    5716         124 :         END_CRIT_SECTION();
    5717             : 
    5718         124 : next:
    5719             :         /* if we find the end of update chain, we're done. */
    5720         248 :         if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
    5721         248 :             HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
    5722         132 :             ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
    5723           8 :             HeapTupleHeaderIsOnlyLocked(mytup.t_data))
    5724             :         {
    5725         118 :             result = TM_Ok;
    5726         118 :             goto out_locked;
    5727             :         }
    5728             : 
    5729             :         /* tail recursion */
    5730           6 :         priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
    5731           6 :         ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
    5732           6 :         UnlockReleaseBuffer(buf);
    5733             :     }
    5734             : 
    5735             :     result = TM_Ok;
    5736             : 
    5737         160 : out_locked:
    5738         160 :     UnlockReleaseBuffer(buf);
    5739             : 
    5740         160 : out_unlocked:
    5741         160 :     if (vmbuffer != InvalidBuffer)
    5742           0 :         ReleaseBuffer(vmbuffer);
    5743             : 
    5744         160 :     return result;
    5745             : }
    5746             : 
    5747             : /*
    5748             :  * heap_lock_updated_tuple
    5749             :  *      Follow update chain when locking an updated tuple, acquiring locks (row
    5750             :  *      marks) on the updated versions.
    5751             :  *
    5752             :  * The initial tuple is assumed to be already locked.
    5753             :  *
    5754             :  * This function doesn't check visibility, it just unconditionally marks the
    5755             :  * tuple(s) as locked.  If any tuple in the updated chain is being deleted
    5756             :  * concurrently (or updated with the key being modified), sleep until the
    5757             :  * transaction doing it is finished.
    5758             :  *
    5759             :  * Note that we don't acquire heavyweight tuple locks on the tuples we walk
    5760             :  * when we have to wait for other transactions to release them, as opposed to
    5761             :  * what heap_lock_tuple does.  The reason is that having more than one
    5762             :  * transaction walking the chain is probably uncommon enough that risk of
    5763             :  * starvation is not likely: one of the preconditions for being here is that
    5764             :  * the snapshot in use predates the update that created this tuple (because we
    5765             :  * started at an earlier version of the tuple), but at the same time such a
    5766             :  * transaction cannot be using repeatable read or serializable isolation
    5767             :  * levels, because that would lead to a serializability failure.
    5768             :  */
    5769             : static TM_Result
    5770         176 : heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
    5771             :                         TransactionId xid, LockTupleMode mode)
    5772             : {
    5773             :     /*
    5774             :      * If the tuple has not been updated, or has moved into another partition
    5775             :      * (effectively a delete) stop here.
    5776             :      */
    5777         176 :     if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) &&
    5778         172 :         !ItemPointerEquals(&tuple->t_self, ctid))
    5779             :     {
    5780             :         /*
    5781             :          * If this is the first possibly-multixact-able operation in the
    5782             :          * current transaction, set my per-backend OldestMemberMXactId
    5783             :          * setting. We can be certain that the transaction will never become a
    5784             :          * member of any older MultiXactIds than that.  (We have to do this
    5785             :          * even if we end up just using our own TransactionId below, since
    5786             :          * some other backend could incorporate our XID into a MultiXact
    5787             :          * immediately afterwards.)
    5788             :          */
    5789         160 :         MultiXactIdSetOldestMember();
    5790             : 
    5791         160 :         return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
    5792             :     }
    5793             : 
    5794             :     /* nothing to lock */
    5795          16 :     return TM_Ok;
    5796             : }
    5797             : 
    5798             : /*
    5799             :  *  heap_finish_speculative - mark speculative insertion as successful
    5800             :  *
    5801             :  * To successfully finish a speculative insertion we have to clear speculative
    5802             :  * token from tuple.  To do so the t_ctid field, which will contain a
    5803             :  * speculative token value, is modified in place to point to the tuple itself,
    5804             :  * which is characteristic of a newly inserted ordinary tuple.
    5805             :  *
    5806             :  * NB: It is not ok to commit without either finishing or aborting a
    5807             :  * speculative insertion.  We could treat speculative tuples of committed
    5808             :  * transactions implicitly as completed, but then we would have to be prepared
    5809             :  * to deal with speculative tokens on committed tuples.  That wouldn't be
    5810             :  * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
    5811             :  * but clearing the token at completion isn't very expensive either.
    5812             :  * An explicit confirmation WAL record also makes logical decoding simpler.
    5813             :  */
    5814             : void
    5815        4010 : heap_finish_speculative(Relation relation, ItemPointer tid)
    5816             : {
    5817             :     Buffer      buffer;
    5818             :     Page        page;
    5819             :     OffsetNumber offnum;
    5820        4010 :     ItemId      lp = NULL;
    5821             :     HeapTupleHeader htup;
    5822             : 
    5823        4010 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    5824        4010 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    5825        4010 :     page = (Page) BufferGetPage(buffer);
    5826             : 
    5827        4010 :     offnum = ItemPointerGetOffsetNumber(tid);
    5828        4010 :     if (PageGetMaxOffsetNumber(page) >= offnum)
    5829        4010 :         lp = PageGetItemId(page, offnum);
    5830             : 
    5831        4010 :     if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    5832           0 :         elog(ERROR, "invalid lp");
    5833             : 
    5834        4010 :     htup = (HeapTupleHeader) PageGetItem(page, lp);
    5835             : 
    5836             :     /* NO EREPORT(ERROR) from here till changes are logged */
    5837        4010 :     START_CRIT_SECTION();
    5838             : 
    5839             :     Assert(HeapTupleHeaderIsSpeculative(htup));
    5840             : 
    5841        4010 :     MarkBufferDirty(buffer);
    5842             : 
    5843             :     /*
    5844             :      * Replace the speculative insertion token with a real t_ctid, pointing to
    5845             :      * itself like it does on regular tuples.
    5846             :      */
    5847        4010 :     htup->t_ctid = *tid;
    5848             : 
    5849             :     /* XLOG stuff */
    5850        4010 :     if (RelationNeedsWAL(relation))
    5851             :     {
    5852             :         xl_heap_confirm xlrec;
    5853             :         XLogRecPtr  recptr;
    5854             : 
    5855        3998 :         xlrec.offnum = ItemPointerGetOffsetNumber(tid);
    5856             : 
    5857        3998 :         XLogBeginInsert();
    5858             : 
    5859             :         /* We want the same filtering on this as on a plain insert */
    5860        3998 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    5861             : 
    5862        3998 :         XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
    5863        3998 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    5864             : 
    5865        3998 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
    5866             : 
    5867        3998 :         PageSetLSN(page, recptr);
    5868             :     }
    5869             : 
    5870        4010 :     END_CRIT_SECTION();
    5871             : 
    5872        4010 :     UnlockReleaseBuffer(buffer);
    5873        4010 : }
    5874             : 
    5875             : /*
    5876             :  *  heap_abort_speculative - kill a speculatively inserted tuple
    5877             :  *
    5878             :  * Marks a tuple that was speculatively inserted in the same command as dead,
    5879             :  * by setting its xmin as invalid.  That makes it immediately appear as dead
    5880             :  * to all transactions, including our own.  In particular, it makes
    5881             :  * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
    5882             :  * inserting a duplicate key value won't unnecessarily wait for our whole
    5883             :  * transaction to finish (it'll just wait for our speculative insertion to
    5884             :  * finish).
    5885             :  *
    5886             :  * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
    5887             :  * that arise due to a mutual dependency that is not user visible.  By
    5888             :  * definition, unprincipled deadlocks cannot be prevented by the user
    5889             :  * reordering lock acquisition in client code, because the implementation level
    5890             :  * lock acquisitions are not under the user's direct control.  If speculative
    5891             :  * inserters did not take this precaution, then under high concurrency they
    5892             :  * could deadlock with each other, which would not be acceptable.
    5893             :  *
    5894             :  * This is somewhat redundant with heap_delete, but we prefer to have a
    5895             :  * dedicated routine with stripped down requirements.  Note that this is also
    5896             :  * used to delete the TOAST tuples created during speculative insertion.
    5897             :  *
    5898             :  * This routine does not affect logical decoding as it only looks at
    5899             :  * confirmation records.
    5900             :  */
    5901             : void
    5902          20 : heap_abort_speculative(Relation relation, ItemPointer tid)
    5903             : {
    5904          20 :     TransactionId xid = GetCurrentTransactionId();
    5905             :     ItemId      lp;
    5906             :     HeapTupleData tp;
    5907             :     Page        page;
    5908             :     BlockNumber block;
    5909             :     Buffer      buffer;
    5910             :     TransactionId prune_xid;
    5911             : 
    5912             :     Assert(ItemPointerIsValid(tid));
    5913             : 
    5914          20 :     block = ItemPointerGetBlockNumber(tid);
    5915          20 :     buffer = ReadBuffer(relation, block);
    5916          20 :     page = BufferGetPage(buffer);
    5917             : 
    5918          20 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    5919             : 
    5920             :     /*
    5921             :      * Page can't be all visible, we just inserted into it, and are still
    5922             :      * running.
    5923             :      */
    5924             :     Assert(!PageIsAllVisible(page));
    5925             : 
    5926          20 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    5927             :     Assert(ItemIdIsNormal(lp));
    5928             : 
    5929          20 :     tp.t_tableOid = RelationGetRelid(relation);
    5930          20 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    5931          20 :     tp.t_len = ItemIdGetLength(lp);
    5932          20 :     tp.t_self = *tid;
    5933             : 
    5934             :     /*
    5935             :      * Sanity check that the tuple really is a speculatively inserted tuple,
    5936             :      * inserted by us.
    5937             :      */
    5938          20 :     if (tp.t_data->t_choice.t_heap.t_xmin != xid)
    5939           0 :         elog(ERROR, "attempted to kill a tuple inserted by another transaction");
    5940          20 :     if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
    5941           0 :         elog(ERROR, "attempted to kill a non-speculative tuple");
    5942             :     Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
    5943             : 
    5944             :     /*
    5945             :      * No need to check for serializable conflicts here.  There is never a
    5946             :      * need for a combo CID, either.  No need to extract replica identity, or
    5947             :      * do anything special with infomask bits.
    5948             :      */
    5949             : 
    5950          20 :     START_CRIT_SECTION();
    5951             : 
    5952             :     /*
    5953             :      * The tuple will become DEAD immediately.  Flag that this page is a
    5954             :      * candidate for pruning by setting xmin to TransactionXmin. While not
    5955             :      * immediately prunable, it is the oldest xid we can cheaply determine
    5956             :      * that's safe against wraparound / being older than the table's
    5957             :      * relfrozenxid.  To defend against the unlikely case of a new relation
    5958             :      * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
    5959             :      * if so (vacuum can't subsequently move relfrozenxid to beyond
    5960             :      * TransactionXmin, so there's no race here).
    5961             :      */
    5962             :     Assert(TransactionIdIsValid(TransactionXmin));
    5963          20 :     if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
    5964           0 :         prune_xid = relation->rd_rel->relfrozenxid;
    5965             :     else
    5966          20 :         prune_xid = TransactionXmin;
    5967          20 :     PageSetPrunable(page, prune_xid);
    5968             : 
    5969             :     /* store transaction information of xact deleting the tuple */
    5970          20 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    5971          20 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    5972             : 
    5973             :     /*
    5974             :      * Set the tuple header xmin to InvalidTransactionId.  This makes the
    5975             :      * tuple immediately invisible everyone.  (In particular, to any
    5976             :      * transactions waiting on the speculative token, woken up later.)
    5977             :      */
    5978          20 :     HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
    5979             : 
    5980             :     /* Clear the speculative insertion token too */
    5981          20 :     tp.t_data->t_ctid = tp.t_self;
    5982             : 
    5983          20 :     MarkBufferDirty(buffer);
    5984             : 
    5985             :     /*
    5986             :      * XLOG stuff
    5987             :      *
    5988             :      * The WAL records generated here match heap_delete().  The same recovery
    5989             :      * routines are used.
    5990             :      */
    5991          20 :     if (RelationNeedsWAL(relation))
    5992             :     {
    5993             :         xl_heap_delete xlrec;
    5994             :         XLogRecPtr  recptr;
    5995             : 
    5996          20 :         xlrec.flags = XLH_DELETE_IS_SUPER;
    5997          40 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    5998          20 :                                               tp.t_data->t_infomask2);
    5999          20 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    6000          20 :         xlrec.xmax = xid;
    6001             : 
    6002          20 :         XLogBeginInsert();
    6003          20 :         XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
    6004          20 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    6005             : 
    6006             :         /* No replica identity & replication origin logged */
    6007             : 
    6008          20 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    6009             : 
    6010          20 :         PageSetLSN(page, recptr);
    6011             :     }
    6012             : 
    6013          20 :     END_CRIT_SECTION();
    6014             : 
    6015          20 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    6016             : 
    6017          20 :     if (HeapTupleHasExternal(&tp))
    6018             :     {
    6019             :         Assert(!IsToastRelation(relation));
    6020           2 :         heap_toast_delete(relation, &tp, true);
    6021             :     }
    6022             : 
    6023             :     /*
    6024             :      * Never need to mark tuple for invalidation, since catalogs don't support
    6025             :      * speculative insertion
    6026             :      */
    6027             : 
    6028             :     /* Now we can release the buffer */
    6029          20 :     ReleaseBuffer(buffer);
    6030             : 
    6031             :     /* count deletion, as we counted the insertion too */
    6032          20 :     pgstat_count_heap_delete(relation);
    6033          20 : }
    6034             : 
    6035             : /*
    6036             :  * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
    6037             :  *
    6038             :  * Overwriting violates both MVCC and transactional safety, so the uses
    6039             :  * of this function in Postgres are extremely limited.  Nonetheless we
    6040             :  * find some places to use it.
    6041             :  *
    6042             :  * The tuple cannot change size, and therefore it's reasonable to assume
    6043             :  * that its null bitmap (if any) doesn't change either.  So we just
    6044             :  * overwrite the data portion of the tuple without touching the null
    6045             :  * bitmap or any of the header fields.
    6046             :  *
    6047             :  * tuple is an in-memory tuple structure containing the data to be written
    6048             :  * over the target tuple.  Also, tuple->t_self identifies the target tuple.
    6049             :  *
    6050             :  * Note that the tuple updated here had better not come directly from the
    6051             :  * syscache if the relation has a toast relation as this tuple could
    6052             :  * include toast values that have been expanded, causing a failure here.
    6053             :  */
    6054             : void
    6055      107688 : heap_inplace_update(Relation relation, HeapTuple tuple)
    6056             : {
    6057             :     Buffer      buffer;
    6058             :     Page        page;
    6059             :     OffsetNumber offnum;
    6060      107688 :     ItemId      lp = NULL;
    6061             :     HeapTupleHeader htup;
    6062             :     uint32      oldlen;
    6063             :     uint32      newlen;
    6064             : 
    6065             :     /*
    6066             :      * For now, we don't allow parallel updates.  Unlike a regular update,
    6067             :      * this should never create a combo CID, so it might be possible to relax
    6068             :      * this restriction, but not without more thought and testing.  It's not
    6069             :      * clear that it would be useful, anyway.
    6070             :      */
    6071      107688 :     if (IsInParallelMode())
    6072           0 :         ereport(ERROR,
    6073             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    6074             :                  errmsg("cannot update tuples during a parallel operation")));
    6075             : 
    6076      107688 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
    6077      107688 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    6078      107688 :     page = (Page) BufferGetPage(buffer);
    6079             : 
    6080      107688 :     offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
    6081      107688 :     if (PageGetMaxOffsetNumber(page) >= offnum)
    6082      107688 :         lp = PageGetItemId(page, offnum);
    6083             : 
    6084      107688 :     if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    6085           0 :         elog(ERROR, "invalid lp");
    6086             : 
    6087      107688 :     htup = (HeapTupleHeader) PageGetItem(page, lp);
    6088             : 
    6089      107688 :     oldlen = ItemIdGetLength(lp) - htup->t_hoff;
    6090      107688 :     newlen = tuple->t_len - tuple->t_data->t_hoff;
    6091      107688 :     if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
    6092           0 :         elog(ERROR, "wrong tuple length");
    6093             : 
    6094             :     /* NO EREPORT(ERROR) from here till changes are logged */
    6095      107688 :     START_CRIT_SECTION();
    6096             : 
    6097      107688 :     memcpy((char *) htup + htup->t_hoff,
    6098      107688 :            (char *) tuple->t_data + tuple->t_data->t_hoff,
    6099             :            newlen);
    6100             : 
    6101      107688 :     MarkBufferDirty(buffer);
    6102             : 
    6103             :     /* XLOG stuff */
    6104      107688 :     if (RelationNeedsWAL(relation))
    6105             :     {
    6106             :         xl_heap_inplace xlrec;
    6107             :         XLogRecPtr  recptr;
    6108             : 
    6109      107672 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    6110             : 
    6111      107672 :         XLogBeginInsert();
    6112      107672 :         XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
    6113             : 
    6114      107672 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    6115      107672 :         XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
    6116             : 
    6117             :         /* inplace updates aren't decoded atm, don't log the origin */
    6118             : 
    6119      107672 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
    6120             : 
    6121      107672 :         PageSetLSN(page, recptr);
    6122             :     }
    6123             : 
    6124      107688 :     END_CRIT_SECTION();
    6125             : 
    6126      107688 :     UnlockReleaseBuffer(buffer);
    6127             : 
    6128             :     /*
    6129             :      * Send out shared cache inval if necessary.  Note that because we only
    6130             :      * pass the new version of the tuple, this mustn't be used for any
    6131             :      * operations that could change catcache lookup keys.  But we aren't
    6132             :      * bothering with index updates either, so that's true a fortiori.
    6133             :      */
    6134      107688 :     if (!IsBootstrapProcessingMode())
    6135       84008 :         CacheInvalidateHeapTuple(relation, tuple, NULL);
    6136      107688 : }
    6137             : 
    6138             : #define     FRM_NOOP                0x0001
    6139             : #define     FRM_INVALIDATE_XMAX     0x0002
    6140             : #define     FRM_RETURN_IS_XID       0x0004
    6141             : #define     FRM_RETURN_IS_MULTI     0x0008
    6142             : #define     FRM_MARK_COMMITTED      0x0010
    6143             : 
    6144             : /*
    6145             :  * FreezeMultiXactId
    6146             :  *      Determine what to do during freezing when a tuple is marked by a
    6147             :  *      MultiXactId.
    6148             :  *
    6149             :  * "flags" is an output value; it's used to tell caller what to do on return.
    6150             :  * "pagefrz" is an input/output value, used to manage page level freezing.
    6151             :  *
    6152             :  * Possible values that we can set in "flags":
    6153             :  * FRM_NOOP
    6154             :  *      don't do anything -- keep existing Xmax
    6155             :  * FRM_INVALIDATE_XMAX
    6156             :  *      mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
    6157             :  * FRM_RETURN_IS_XID
    6158             :  *      The Xid return value is a single update Xid to set as xmax.
    6159             :  * FRM_MARK_COMMITTED
    6160             :  *      Xmax can be marked as HEAP_XMAX_COMMITTED
    6161             :  * FRM_RETURN_IS_MULTI
    6162             :  *      The return value is a new MultiXactId to set as new Xmax.
    6163             :  *      (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
    6164             :  *
    6165             :  * Caller delegates control of page freezing to us.  In practice we always
    6166             :  * force freezing of caller's page unless FRM_NOOP processing is indicated.
    6167             :  * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
    6168             :  * can never be left behind.  We freely choose when and how to process each
    6169             :  * Multi, without ever violating the cutoff postconditions for freezing.
    6170             :  *
    6171             :  * It's useful to remove Multis on a proactive timeline (relative to freezing
    6172             :  * XIDs) to keep MultiXact member SLRU buffer misses to a minimum.  It can also
    6173             :  * be cheaper in the short run, for us, since we too can avoid SLRU buffer
    6174             :  * misses through eager processing.
    6175             :  *
    6176             :  * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
    6177             :  * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
    6178             :  * This can usually be put off, which is usually enough to avoid it altogether.
    6179             :  * Allocating new multis during VACUUM should be avoided on general principle;
    6180             :  * only VACUUM can advance relminmxid, so allocating new Multis here comes with
    6181             :  * its own special risks.
    6182             :  *
    6183             :  * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
    6184             :  * using heap_tuple_should_freeze when we haven't forced page-level freezing.
    6185             :  *
    6186             :  * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
    6187             :  * have already forced page-level freezing, since that might incur the same
    6188             :  * SLRU buffer misses that we specifically intended to avoid by freezing.
    6189             :  */
    6190             : static TransactionId
    6191          16 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
    6192             :                   const struct VacuumCutoffs *cutoffs, uint16 *flags,
    6193             :                   HeapPageFreeze *pagefrz)
    6194             : {
    6195             :     TransactionId newxmax;
    6196             :     MultiXactMember *members;
    6197             :     int         nmembers;
    6198             :     bool        need_replace;
    6199             :     int         nnewmembers;
    6200             :     MultiXactMember *newmembers;
    6201             :     bool        has_lockers;
    6202             :     TransactionId update_xid;
    6203             :     bool        update_committed;
    6204             :     TransactionId FreezePageRelfrozenXid;
    6205             : 
    6206          16 :     *flags = 0;
    6207             : 
    6208             :     /* We should only be called in Multis */
    6209             :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    6210             : 
    6211          16 :     if (!MultiXactIdIsValid(multi) ||
    6212          16 :         HEAP_LOCKED_UPGRADED(t_infomask))
    6213             :     {
    6214           0 :         *flags |= FRM_INVALIDATE_XMAX;
    6215           0 :         pagefrz->freeze_required = true;
    6216           0 :         return InvalidTransactionId;
    6217             :     }
    6218          16 :     else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
    6219           0 :         ereport(ERROR,
    6220             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    6221             :                  errmsg_internal("found multixact %u from before relminmxid %u",
    6222             :                                  multi, cutoffs->relminmxid)));
    6223          16 :     else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
    6224             :     {
    6225             :         TransactionId update_xact;
    6226             : 
    6227             :         /*
    6228             :          * This old multi cannot possibly have members still running, but
    6229             :          * verify just in case.  If it was a locker only, it can be removed
    6230             :          * without any further consideration; but if it contained an update,
    6231             :          * we might need to preserve it.
    6232             :          */
    6233          12 :         if (MultiXactIdIsRunning(multi,
    6234          12 :                                  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
    6235           0 :             ereport(ERROR,
    6236             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6237             :                      errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
    6238             :                                      multi, cutoffs->OldestMxact)));
    6239             : 
    6240          12 :         if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
    6241             :         {
    6242          12 :             *flags |= FRM_INVALIDATE_XMAX;
    6243          12 :             pagefrz->freeze_required = true;
    6244          12 :             return InvalidTransactionId;
    6245             :         }
    6246             : 
    6247             :         /* replace multi with single XID for its updater? */
    6248           0 :         update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
    6249           0 :         if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
    6250           0 :             ereport(ERROR,
    6251             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6252             :                      errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
    6253             :                                      multi, update_xact,
    6254             :                                      cutoffs->relfrozenxid)));
    6255           0 :         else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
    6256             :         {
    6257             :             /*
    6258             :              * Updater XID has to have aborted (otherwise the tuple would have
    6259             :              * been pruned away instead, since updater XID is < OldestXmin).
    6260             :              * Just remove xmax.
    6261             :              */
    6262           0 :             if (TransactionIdDidCommit(update_xact))
    6263           0 :                 ereport(ERROR,
    6264             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6265             :                          errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
    6266             :                                          multi, update_xact,
    6267             :                                          cutoffs->OldestXmin)));
    6268           0 :             *flags |= FRM_INVALIDATE_XMAX;
    6269           0 :             pagefrz->freeze_required = true;
    6270           0 :             return InvalidTransactionId;
    6271             :         }
    6272             : 
    6273             :         /* Have to keep updater XID as new xmax */
    6274           0 :         *flags |= FRM_RETURN_IS_XID;
    6275           0 :         pagefrz->freeze_required = true;
    6276           0 :         return update_xact;
    6277             :     }
    6278             : 
    6279             :     /*
    6280             :      * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
    6281             :      * need to walk the whole members array to figure out what to do, if
    6282             :      * anything.
    6283             :      */
    6284             :     nmembers =
    6285           4 :         GetMultiXactIdMembers(multi, &members, false,
    6286           4 :                               HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
    6287           4 :     if (nmembers <= 0)
    6288             :     {
    6289             :         /* Nothing worth keeping */
    6290           0 :         *flags |= FRM_INVALIDATE_XMAX;
    6291           0 :         pagefrz->freeze_required = true;
    6292           0 :         return InvalidTransactionId;
    6293             :     }
    6294             : 
    6295             :     /*
    6296             :      * The FRM_NOOP case is the only case where we might need to ratchet back
    6297             :      * FreezePageRelfrozenXid or FreezePageRelminMxid.  It is also the only
    6298             :      * case where our caller might ratchet back its NoFreezePageRelfrozenXid
    6299             :      * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
    6300             :      * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
    6301             :      * trackers managed by VACUUM being ratcheting back by xmax to the degree
    6302             :      * required to make it safe to leave xmax undisturbed, independent of
    6303             :      * whether or not page freezing is triggered somewhere else.
    6304             :      *
    6305             :      * Our policy is to force freezing in every case other than FRM_NOOP,
    6306             :      * which obviates the need to maintain either set of trackers, anywhere.
    6307             :      * Every other case will reliably execute a freeze plan for xmax that
    6308             :      * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
    6309             :      * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
    6310             :      * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
    6311             :      * OldestXmin/OldestMxact, so later values never need to be tracked here.)
    6312             :      */
    6313           4 :     need_replace = false;
    6314           4 :     FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
    6315           8 :     for (int i = 0; i < nmembers; i++)
    6316             :     {
    6317           6 :         TransactionId xid = members[i].xid;
    6318             : 
    6319             :         Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
    6320             : 
    6321           6 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    6322             :         {
    6323             :             /* Can't violate the FreezeLimit postcondition */
    6324           2 :             need_replace = true;
    6325           2 :             break;
    6326             :         }
    6327           4 :         if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
    6328           0 :             FreezePageRelfrozenXid = xid;
    6329             :     }
    6330             : 
    6331             :     /* Can't violate the MultiXactCutoff postcondition, either */
    6332           4 :     if (!need_replace)
    6333           2 :         need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
    6334             : 
    6335           4 :     if (!need_replace)
    6336             :     {
    6337             :         /*
    6338             :          * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
    6339             :          * both together to make it safe to retain this particular multi after
    6340             :          * freezing its page
    6341             :          */
    6342           2 :         *flags |= FRM_NOOP;
    6343           2 :         pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
    6344           2 :         if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
    6345           0 :             pagefrz->FreezePageRelminMxid = multi;
    6346           2 :         pfree(members);
    6347           2 :         return multi;
    6348             :     }
    6349             : 
    6350             :     /*
    6351             :      * Do a more thorough second pass over the multi to figure out which
    6352             :      * member XIDs actually need to be kept.  Checking the precise status of
    6353             :      * individual members might even show that we don't need to keep anything.
    6354             :      * That is quite possible even though the Multi must be >= OldestMxact,
    6355             :      * since our second pass only keeps member XIDs when it's truly necessary;
    6356             :      * even member XIDs >= OldestXmin often won't be kept by second pass.
    6357             :      */
    6358           2 :     nnewmembers = 0;
    6359           2 :     newmembers = palloc(sizeof(MultiXactMember) * nmembers);
    6360           2 :     has_lockers = false;
    6361           2 :     update_xid = InvalidTransactionId;
    6362           2 :     update_committed = false;
    6363             : 
    6364             :     /*
    6365             :      * Determine whether to keep each member xid, or to ignore it instead
    6366             :      */
    6367           6 :     for (int i = 0; i < nmembers; i++)
    6368             :     {
    6369           4 :         TransactionId xid = members[i].xid;
    6370           4 :         MultiXactStatus mstatus = members[i].status;
    6371             : 
    6372             :         Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
    6373             : 
    6374           4 :         if (!ISUPDATE_from_mxstatus(mstatus))
    6375             :         {
    6376             :             /*
    6377             :              * Locker XID (not updater XID).  We only keep lockers that are
    6378             :              * still running.
    6379             :              */
    6380           8 :             if (TransactionIdIsCurrentTransactionId(xid) ||
    6381           4 :                 TransactionIdIsInProgress(xid))
    6382             :             {
    6383           2 :                 if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
    6384           0 :                     ereport(ERROR,
    6385             :                             (errcode(ERRCODE_DATA_CORRUPTED),
    6386             :                              errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
    6387             :                                              multi, xid,
    6388             :                                              cutoffs->OldestXmin)));
    6389           2 :                 newmembers[nnewmembers++] = members[i];
    6390           2 :                 has_lockers = true;
    6391             :             }
    6392             : 
    6393           4 :             continue;
    6394             :         }
    6395             : 
    6396             :         /*
    6397             :          * Updater XID (not locker XID).  Should we keep it?
    6398             :          *
    6399             :          * Since the tuple wasn't totally removed when vacuum pruned, the
    6400             :          * update Xid cannot possibly be older than OldestXmin cutoff unless
    6401             :          * the updater XID aborted.  If the updater transaction is known
    6402             :          * aborted or crashed then it's okay to ignore it, otherwise not.
    6403             :          *
    6404             :          * In any case the Multi should never contain two updaters, whatever
    6405             :          * their individual commit status.  Check for that first, in passing.
    6406             :          */
    6407           0 :         if (TransactionIdIsValid(update_xid))
    6408           0 :             ereport(ERROR,
    6409             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6410             :                      errmsg_internal("multixact %u has two or more updating members",
    6411             :                                      multi),
    6412             :                      errdetail_internal("First updater XID=%u second updater XID=%u.",
    6413             :                                         update_xid, xid)));
    6414             : 
    6415             :         /*
    6416             :          * As with all tuple visibility routines, it's critical to test
    6417             :          * TransactionIdIsInProgress before TransactionIdDidCommit, because of
    6418             :          * race conditions explained in detail in heapam_visibility.c.
    6419             :          */
    6420           0 :         if (TransactionIdIsCurrentTransactionId(xid) ||
    6421           0 :             TransactionIdIsInProgress(xid))
    6422           0 :             update_xid = xid;
    6423           0 :         else if (TransactionIdDidCommit(xid))
    6424             :         {
    6425             :             /*
    6426             :              * The transaction committed, so we can tell caller to set
    6427             :              * HEAP_XMAX_COMMITTED.  (We can only do this because we know the
    6428             :              * transaction is not running.)
    6429             :              */
    6430           0 :             update_committed = true;
    6431           0 :             update_xid = xid;
    6432             :         }
    6433             :         else
    6434             :         {
    6435             :             /*
    6436             :              * Not in progress, not committed -- must be aborted or crashed;
    6437             :              * we can ignore it.
    6438             :              */
    6439           0 :             continue;
    6440             :         }
    6441             : 
    6442             :         /*
    6443             :          * We determined that updater must be kept -- add it to pending new
    6444             :          * members list
    6445             :          */
    6446           0 :         if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
    6447           0 :             ereport(ERROR,
    6448             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6449             :                      errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
    6450             :                                      multi, xid, cutoffs->OldestXmin)));
    6451           0 :         newmembers[nnewmembers++] = members[i];
    6452             :     }
    6453             : 
    6454           2 :     pfree(members);
    6455             : 
    6456             :     /*
    6457             :      * Determine what to do with caller's multi based on information gathered
    6458             :      * during our second pass
    6459             :      */
    6460           2 :     if (nnewmembers == 0)
    6461             :     {
    6462             :         /* Nothing worth keeping */
    6463           0 :         *flags |= FRM_INVALIDATE_XMAX;
    6464           0 :         newxmax = InvalidTransactionId;
    6465             :     }
    6466           2 :     else if (TransactionIdIsValid(update_xid) && !has_lockers)
    6467             :     {
    6468             :         /*
    6469             :          * If there's a single member and it's an update, pass it back alone
    6470             :          * without creating a new Multi.  (XXX we could do this when there's a
    6471             :          * single remaining locker, too, but that would complicate the API too
    6472             :          * much; moreover, the case with the single updater is more
    6473             :          * interesting, because those are longer-lived.)
    6474             :          */
    6475             :         Assert(nnewmembers == 1);
    6476           0 :         *flags |= FRM_RETURN_IS_XID;
    6477           0 :         if (update_committed)
    6478           0 :             *flags |= FRM_MARK_COMMITTED;
    6479           0 :         newxmax = update_xid;
    6480             :     }
    6481             :     else
    6482             :     {
    6483             :         /*
    6484             :          * Create a new multixact with the surviving members of the previous
    6485             :          * one, to set as new Xmax in the tuple
    6486             :          */
    6487           2 :         newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
    6488           2 :         *flags |= FRM_RETURN_IS_MULTI;
    6489             :     }
    6490             : 
    6491           2 :     pfree(newmembers);
    6492             : 
    6493           2 :     pagefrz->freeze_required = true;
    6494           2 :     return newxmax;
    6495             : }
    6496             : 
    6497             : /*
    6498             :  * heap_prepare_freeze_tuple
    6499             :  *
    6500             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    6501             :  * are older than the OldestXmin and/or OldestMxact freeze cutoffs.  If so,
    6502             :  * setup enough state (in the *frz output argument) to enable caller to
    6503             :  * process this tuple as part of freezing its page, and return true.  Return
    6504             :  * false if nothing can be changed about the tuple right now.
    6505             :  *
    6506             :  * Also sets *totally_frozen to true if the tuple will be totally frozen once
    6507             :  * caller executes returned freeze plan (or if the tuple was already totally
    6508             :  * frozen by an earlier VACUUM).  This indicates that there are no remaining
    6509             :  * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
    6510             :  *
    6511             :  * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
    6512             :  * tuple that we returned true for, and then execute freezing.  Caller must
    6513             :  * initialize pagefrz fields for page as a whole before first call here for
    6514             :  * each heap page.
    6515             :  *
    6516             :  * VACUUM caller decides on whether or not to freeze the page as a whole.
    6517             :  * We'll often prepare freeze plans for a page that caller just discards.
    6518             :  * However, VACUUM doesn't always get to make a choice; it must freeze when
    6519             :  * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
    6520             :  * MXIDs < MultiXactCutoff) can never be left behind.  We help to make sure
    6521             :  * that VACUUM always follows that rule.
    6522             :  *
    6523             :  * We sometimes force freezing of xmax MultiXactId values long before it is
    6524             :  * strictly necessary to do so just to ensure the FreezeLimit postcondition.
    6525             :  * It's worth processing MultiXactIds proactively when it is cheap to do so,
    6526             :  * and it's convenient to make that happen by piggy-backing it on the "force
    6527             :  * freezing" mechanism.  Conversely, we sometimes delay freezing MultiXactIds
    6528             :  * because it is expensive right now (though only when it's still possible to
    6529             :  * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
    6530             :  *
    6531             :  * It is assumed that the caller has checked the tuple with
    6532             :  * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
    6533             :  * (else we should be removing the tuple, not freezing it).
    6534             :  *
    6535             :  * NB: This function has side effects: it might allocate a new MultiXactId.
    6536             :  * It will be set as tuple's new xmax when our *frz output is processed within
    6537             :  * heap_execute_freeze_tuple later on.  If the tuple is in a shared buffer
    6538             :  * then caller had better have an exclusive lock on it already.
    6539             :  */
    6540             : bool
    6541     6251366 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
    6542             :                           const struct VacuumCutoffs *cutoffs,
    6543             :                           HeapPageFreeze *pagefrz,
    6544             :                           HeapTupleFreeze *frz, bool *totally_frozen)
    6545             : {
    6546     6251366 :     bool        xmin_already_frozen = false,
    6547     6251366 :                 xmax_already_frozen = false;
    6548     6251366 :     bool        freeze_xmin = false,
    6549     6251366 :                 replace_xvac = false,
    6550     6251366 :                 replace_xmax = false,
    6551     6251366 :                 freeze_xmax = false;
    6552             :     TransactionId xid;
    6553             : 
    6554     6251366 :     frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
    6555     6251366 :     frz->t_infomask2 = tuple->t_infomask2;
    6556     6251366 :     frz->t_infomask = tuple->t_infomask;
    6557     6251366 :     frz->frzflags = 0;
    6558     6251366 :     frz->checkflags = 0;
    6559             : 
    6560             :     /*
    6561             :      * Process xmin, while keeping track of whether it's already frozen, or
    6562             :      * will become frozen iff our freeze plan is executed by caller (could be
    6563             :      * neither).
    6564             :      */
    6565     6251366 :     xid = HeapTupleHeaderGetXmin(tuple);
    6566     6251366 :     if (!TransactionIdIsNormal(xid))
    6567     1857602 :         xmin_already_frozen = true;
    6568             :     else
    6569             :     {
    6570     4393764 :         if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
    6571           0 :             ereport(ERROR,
    6572             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6573             :                      errmsg_internal("found xmin %u from before relfrozenxid %u",
    6574             :                                      xid, cutoffs->relfrozenxid)));
    6575             : 
    6576             :         /* Will set freeze_xmin flags in freeze plan below */
    6577     4393764 :         freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
    6578             : 
    6579             :         /* Verify that xmin committed if and when freeze plan is executed */
    6580     4393764 :         if (freeze_xmin)
    6581     3201388 :             frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
    6582             :     }
    6583             : 
    6584             :     /*
    6585             :      * Old-style VACUUM FULL is gone, but we have to process xvac for as long
    6586             :      * as we support having MOVED_OFF/MOVED_IN tuples in the database
    6587             :      */
    6588     6251366 :     xid = HeapTupleHeaderGetXvac(tuple);
    6589     6251366 :     if (TransactionIdIsNormal(xid))
    6590             :     {
    6591             :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    6592             :         Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
    6593             : 
    6594             :         /*
    6595             :          * For Xvac, we always freeze proactively.  This allows totally_frozen
    6596             :          * tracking to ignore xvac.
    6597             :          */
    6598           0 :         replace_xvac = pagefrz->freeze_required = true;
    6599             : 
    6600             :         /* Will set replace_xvac flags in freeze plan below */
    6601             :     }
    6602             : 
    6603             :     /* Now process xmax */
    6604     6251366 :     xid = frz->xmax;
    6605     6251366 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    6606             :     {
    6607             :         /* Raw xmax is a MultiXactId */
    6608             :         TransactionId newxmax;
    6609             :         uint16      flags;
    6610             : 
    6611             :         /*
    6612             :          * We will either remove xmax completely (in the "freeze_xmax" path),
    6613             :          * process xmax by replacing it (in the "replace_xmax" path), or
    6614             :          * perform no-op xmax processing.  The only constraint is that the
    6615             :          * FreezeLimit/MultiXactCutoff postcondition must never be violated.
    6616             :          */
    6617          16 :         newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
    6618             :                                     &flags, pagefrz);
    6619             : 
    6620          16 :         if (flags & FRM_NOOP)
    6621             :         {
    6622             :             /*
    6623             :              * xmax is a MultiXactId, and nothing about it changes for now.
    6624             :              * This is the only case where 'freeze_required' won't have been
    6625             :              * set for us by FreezeMultiXactId, as well as the only case where
    6626             :              * neither freeze_xmax nor replace_xmax are set (given a multi).
    6627             :              *
    6628             :              * This is a no-op, but the call to FreezeMultiXactId might have
    6629             :              * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
    6630             :              * for us (the "freeze page" variants, specifically).  That'll
    6631             :              * make it safe for our caller to freeze the page later on, while
    6632             :              * leaving this particular xmax undisturbed.
    6633             :              *
    6634             :              * FreezeMultiXactId is _not_ responsible for the "no freeze"
    6635             :              * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
    6636             :              * job.  A call to heap_tuple_should_freeze for this same tuple
    6637             :              * will take place below if 'freeze_required' isn't set already.
    6638             :              * (This repeats work from FreezeMultiXactId, but allows "no
    6639             :              * freeze" tracker maintenance to happen in only one place.)
    6640             :              */
    6641             :             Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
    6642             :             Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
    6643             :         }
    6644          14 :         else if (flags & FRM_RETURN_IS_XID)
    6645             :         {
    6646             :             /*
    6647             :              * xmax will become an updater Xid (original MultiXact's updater
    6648             :              * member Xid will be carried forward as a simple Xid in Xmax).
    6649             :              */
    6650             :             Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
    6651             : 
    6652             :             /*
    6653             :              * NB -- some of these transformations are only valid because we
    6654             :              * know the return Xid is a tuple updater (i.e. not merely a
    6655             :              * locker.) Also note that the only reason we don't explicitly
    6656             :              * worry about HEAP_KEYS_UPDATED is because it lives in
    6657             :              * t_infomask2 rather than t_infomask.
    6658             :              */
    6659           0 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    6660           0 :             frz->xmax = newxmax;
    6661           0 :             if (flags & FRM_MARK_COMMITTED)
    6662           0 :                 frz->t_infomask |= HEAP_XMAX_COMMITTED;
    6663           0 :             replace_xmax = true;
    6664             :         }
    6665          14 :         else if (flags & FRM_RETURN_IS_MULTI)
    6666             :         {
    6667             :             uint16      newbits;
    6668             :             uint16      newbits2;
    6669             : 
    6670             :             /*
    6671             :              * xmax is an old MultiXactId that we have to replace with a new
    6672             :              * MultiXactId, to carry forward two or more original member XIDs.
    6673             :              */
    6674             :             Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
    6675             : 
    6676             :             /*
    6677             :              * We can't use GetMultiXactIdHintBits directly on the new multi
    6678             :              * here; that routine initializes the masks to all zeroes, which
    6679             :              * would lose other bits we need.  Doing it this way ensures all
    6680             :              * unrelated bits remain untouched.
    6681             :              */
    6682           2 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    6683           2 :             frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6684           2 :             GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
    6685           2 :             frz->t_infomask |= newbits;
    6686           2 :             frz->t_infomask2 |= newbits2;
    6687           2 :             frz->xmax = newxmax;
    6688           2 :             replace_xmax = true;
    6689             :         }
    6690             :         else
    6691             :         {
    6692             :             /*
    6693             :              * Freeze plan for tuple "freezes xmax" in the strictest sense:
    6694             :              * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
    6695             :              */
    6696             :             Assert(flags & FRM_INVALIDATE_XMAX);
    6697             :             Assert(!TransactionIdIsValid(newxmax));
    6698             : 
    6699             :             /* Will set freeze_xmax flags in freeze plan below */
    6700          12 :             freeze_xmax = true;
    6701             :         }
    6702             : 
    6703             :         /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
    6704             :         Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
    6705             :     }
    6706     6251350 :     else if (TransactionIdIsNormal(xid))
    6707             :     {
    6708             :         /* Raw xmax is normal XID */
    6709      532794 :         if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
    6710           0 :             ereport(ERROR,
    6711             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6712             :                      errmsg_internal("found xmax %u from before relfrozenxid %u",
    6713             :                                      xid, cutoffs->relfrozenxid)));
    6714             : 
    6715             :         /* Will set freeze_xmax flags in freeze plan below */
    6716      532794 :         freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
    6717             : 
    6718             :         /*
    6719             :          * Verify that xmax aborted if and when freeze plan is executed,
    6720             :          * provided it's from an update. (A lock-only xmax can be removed
    6721             :          * independent of this, since the lock is released at xact end.)
    6722             :          */
    6723      532794 :         if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
    6724        1338 :             frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
    6725             :     }
    6726     5718556 :     else if (!TransactionIdIsValid(xid))
    6727             :     {
    6728             :         /* Raw xmax is InvalidTransactionId XID */
    6729             :         Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
    6730     5718556 :         xmax_already_frozen = true;
    6731             :     }
    6732             :     else
    6733           0 :         ereport(ERROR,
    6734             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    6735             :                  errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
    6736             :                                  xid, tuple->t_infomask)));
    6737             : 
    6738     6251366 :     if (freeze_xmin)
    6739             :     {
    6740             :         Assert(!xmin_already_frozen);
    6741             : 
    6742     3201388 :         frz->t_infomask |= HEAP_XMIN_FROZEN;
    6743             :     }
    6744     6251366 :     if (replace_xvac)
    6745             :     {
    6746             :         /*
    6747             :          * If a MOVED_OFF tuple is not dead, the xvac transaction must have
    6748             :          * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
    6749             :          * transaction succeeded.
    6750             :          */
    6751             :         Assert(pagefrz->freeze_required);
    6752           0 :         if (tuple->t_infomask & HEAP_MOVED_OFF)
    6753           0 :             frz->frzflags |= XLH_INVALID_XVAC;
    6754             :         else
    6755           0 :             frz->frzflags |= XLH_FREEZE_XVAC;
    6756             :     }
    6757             :     if (replace_xmax)
    6758             :     {
    6759             :         Assert(!xmax_already_frozen && !freeze_xmax);
    6760             :         Assert(pagefrz->freeze_required);
    6761             : 
    6762             :         /* Already set replace_xmax flags in freeze plan earlier */
    6763             :     }
    6764     6251366 :     if (freeze_xmax)
    6765             :     {
    6766             :         Assert(!xmax_already_frozen && !replace_xmax);
    6767             : 
    6768        2866 :         frz->xmax = InvalidTransactionId;
    6769             : 
    6770             :         /*
    6771             :          * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
    6772             :          * LOCKED.  Normalize to INVALID just to be sure no one gets confused.
    6773             :          * Also get rid of the HEAP_KEYS_UPDATED bit.
    6774             :          */
    6775        2866 :         frz->t_infomask &= ~HEAP_XMAX_BITS;
    6776        2866 :         frz->t_infomask |= HEAP_XMAX_INVALID;
    6777        2866 :         frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
    6778        2866 :         frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6779             :     }
    6780             : 
    6781             :     /*
    6782             :      * Determine if this tuple is already totally frozen, or will become
    6783             :      * totally frozen (provided caller executes freeze plans for the page)
    6784             :      */
    6785    11307490 :     *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
    6786     5056124 :                        (freeze_xmax || xmax_already_frozen));
    6787             : 
    6788     6251366 :     if (!pagefrz->freeze_required && !(xmin_already_frozen &&
    6789             :                                        xmax_already_frozen))
    6790             :     {
    6791             :         /*
    6792             :          * So far no previous tuple from the page made freezing mandatory.
    6793             :          * Does this tuple force caller to freeze the entire page?
    6794             :          */
    6795     2845334 :         pagefrz->freeze_required =
    6796     2845334 :             heap_tuple_should_freeze(tuple, cutoffs,
    6797             :                                      &pagefrz->NoFreezePageRelfrozenXid,
    6798             :                                      &pagefrz->NoFreezePageRelminMxid);
    6799             :     }
    6800             : 
    6801             :     /* Tell caller if this tuple has a usable freeze plan set in *frz */
    6802     6251366 :     return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
    6803             : }
    6804             : 
    6805             : /*
    6806             :  * heap_execute_freeze_tuple
    6807             :  *      Execute the prepared freezing of a tuple with caller's freeze plan.
    6808             :  *
    6809             :  * Caller is responsible for ensuring that no other backend can access the
    6810             :  * storage underlying this tuple, either by holding an exclusive lock on the
    6811             :  * buffer containing it (which is what lazy VACUUM does), or by having it be
    6812             :  * in private storage (which is what CLUSTER and friends do).
    6813             :  */
    6814             : static inline void
    6815     1455490 : heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
    6816             : {
    6817     1455490 :     HeapTupleHeaderSetXmax(tuple, frz->xmax);
    6818             : 
    6819     1455490 :     if (frz->frzflags & XLH_FREEZE_XVAC)
    6820           0 :         HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
    6821             : 
    6822     1455490 :     if (frz->frzflags & XLH_INVALID_XVAC)
    6823           0 :         HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
    6824             : 
    6825     1455490 :     tuple->t_infomask = frz->t_infomask;
    6826     1455490 :     tuple->t_infomask2 = frz->t_infomask2;
    6827     1455490 : }
    6828             : 
    6829             : /*
    6830             :  * Perform xmin/xmax XID status sanity checks before actually executing freeze
    6831             :  * plans.
    6832             :  *
    6833             :  * heap_prepare_freeze_tuple doesn't perform these checks directly because
    6834             :  * pg_xact lookups are relatively expensive.  They shouldn't be repeated by
    6835             :  * successive VACUUMs that each decide against freezing the same page.
    6836             :  */
    6837             : void
    6838       22352 : heap_pre_freeze_checks(Buffer buffer,
    6839             :                        HeapTupleFreeze *tuples, int ntuples)
    6840             : {
    6841       22352 :     Page        page = BufferGetPage(buffer);
    6842             : 
    6843      978084 :     for (int i = 0; i < ntuples; i++)
    6844             :     {
    6845      955732 :         HeapTupleFreeze *frz = tuples + i;
    6846      955732 :         ItemId      itemid = PageGetItemId(page, frz->offset);
    6847             :         HeapTupleHeader htup;
    6848             : 
    6849      955732 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
    6850             : 
    6851             :         /* Deliberately avoid relying on tuple hint bits here */
    6852      955732 :         if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
    6853             :         {
    6854      955730 :             TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
    6855             : 
    6856             :             Assert(!HeapTupleHeaderXminFrozen(htup));
    6857      955730 :             if (unlikely(!TransactionIdDidCommit(xmin)))
    6858           0 :                 ereport(ERROR,
    6859             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6860             :                          errmsg_internal("uncommitted xmin %u needs to be frozen",
    6861             :                                          xmin)));
    6862             :         }
    6863             : 
    6864             :         /*
    6865             :          * TransactionIdDidAbort won't work reliably in the presence of XIDs
    6866             :          * left behind by transactions that were in progress during a crash,
    6867             :          * so we can only check that xmax didn't commit
    6868             :          */
    6869      955732 :         if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
    6870             :         {
    6871         240 :             TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
    6872             : 
    6873             :             Assert(TransactionIdIsNormal(xmax));
    6874         240 :             if (unlikely(TransactionIdDidCommit(xmax)))
    6875           0 :                 ereport(ERROR,
    6876             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6877             :                          errmsg_internal("cannot freeze committed xmax %u",
    6878             :                                          xmax)));
    6879             :         }
    6880             :     }
    6881       22352 : }
    6882             : 
    6883             : /*
    6884             :  * Helper which executes freezing of one or more heap tuples on a page on
    6885             :  * behalf of caller.  Caller passes an array of tuple plans from
    6886             :  * heap_prepare_freeze_tuple.  Caller must set 'offset' in each plan for us.
    6887             :  * Must be called in a critical section that also marks the buffer dirty and,
    6888             :  * if needed, emits WAL.
    6889             :  */
    6890             : void
    6891       22352 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
    6892             : {
    6893       22352 :     Page        page = BufferGetPage(buffer);
    6894             : 
    6895      978084 :     for (int i = 0; i < ntuples; i++)
    6896             :     {
    6897      955732 :         HeapTupleFreeze *frz = tuples + i;
    6898      955732 :         ItemId      itemid = PageGetItemId(page, frz->offset);
    6899             :         HeapTupleHeader htup;
    6900             : 
    6901      955732 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
    6902      955732 :         heap_execute_freeze_tuple(htup, frz);
    6903             :     }
    6904       22352 : }
    6905             : 
    6906             : /*
    6907             :  * heap_freeze_tuple
    6908             :  *      Freeze tuple in place, without WAL logging.
    6909             :  *
    6910             :  * Useful for callers like CLUSTER that perform their own WAL logging.
    6911             :  */
    6912             : bool
    6913      728328 : heap_freeze_tuple(HeapTupleHeader tuple,
    6914             :                   TransactionId relfrozenxid, TransactionId relminmxid,
    6915             :                   TransactionId FreezeLimit, TransactionId MultiXactCutoff)
    6916             : {
    6917             :     HeapTupleFreeze frz;
    6918             :     bool        do_freeze;
    6919             :     bool        totally_frozen;
    6920             :     struct VacuumCutoffs cutoffs;
    6921             :     HeapPageFreeze pagefrz;
    6922             : 
    6923      728328 :     cutoffs.relfrozenxid = relfrozenxid;
    6924      728328 :     cutoffs.relminmxid = relminmxid;
    6925      728328 :     cutoffs.OldestXmin = FreezeLimit;
    6926      728328 :     cutoffs.OldestMxact = MultiXactCutoff;
    6927      728328 :     cutoffs.FreezeLimit = FreezeLimit;
    6928      728328 :     cutoffs.MultiXactCutoff = MultiXactCutoff;
    6929             : 
    6930      728328 :     pagefrz.freeze_required = true;
    6931      728328 :     pagefrz.FreezePageRelfrozenXid = FreezeLimit;
    6932      728328 :     pagefrz.FreezePageRelminMxid = MultiXactCutoff;
    6933      728328 :     pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
    6934      728328 :     pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
    6935             : 
    6936      728328 :     do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
    6937             :                                           &pagefrz, &frz, &totally_frozen);
    6938             : 
    6939             :     /*
    6940             :      * Note that because this is not a WAL-logged operation, we don't need to
    6941             :      * fill in the offset in the freeze record.
    6942             :      */
    6943             : 
    6944      728328 :     if (do_freeze)
    6945      496104 :         heap_execute_freeze_tuple(tuple, &frz);
    6946      728328 :     return do_freeze;
    6947             : }
    6948             : 
    6949             : /*
    6950             :  * For a given MultiXactId, return the hint bits that should be set in the
    6951             :  * tuple's infomask.
    6952             :  *
    6953             :  * Normally this should be called for a multixact that was just created, and
    6954             :  * so is on our local cache, so the GetMembers call is fast.
    6955             :  */
    6956             : static void
    6957        2370 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
    6958             :                        uint16 *new_infomask2)
    6959             : {
    6960             :     int         nmembers;
    6961             :     MultiXactMember *members;
    6962             :     int         i;
    6963        2370 :     uint16      bits = HEAP_XMAX_IS_MULTI;
    6964        2370 :     uint16      bits2 = 0;
    6965        2370 :     bool        has_update = false;
    6966        2370 :     LockTupleMode strongest = LockTupleKeyShare;
    6967             : 
    6968             :     /*
    6969             :      * We only use this in multis we just created, so they cannot be values
    6970             :      * pre-pg_upgrade.
    6971             :      */
    6972        2370 :     nmembers = GetMultiXactIdMembers(multi, &members, false, false);
    6973             : 
    6974        7258 :     for (i = 0; i < nmembers; i++)
    6975             :     {
    6976             :         LockTupleMode mode;
    6977             : 
    6978             :         /*
    6979             :          * Remember the strongest lock mode held by any member of the
    6980             :          * multixact.
    6981             :          */
    6982        4888 :         mode = TUPLOCK_from_mxstatus(members[i].status);
    6983        4888 :         if (mode > strongest)
    6984        1312 :             strongest = mode;
    6985             : 
    6986             :         /* See what other bits we need */
    6987        4888 :         switch (members[i].status)
    6988             :         {
    6989        4506 :             case MultiXactStatusForKeyShare:
    6990             :             case MultiXactStatusForShare:
    6991             :             case MultiXactStatusForNoKeyUpdate:
    6992        4506 :                 break;
    6993             : 
    6994         104 :             case MultiXactStatusForUpdate:
    6995         104 :                 bits2 |= HEAP_KEYS_UPDATED;
    6996         104 :                 break;
    6997             : 
    6998         258 :             case MultiXactStatusNoKeyUpdate:
    6999         258 :                 has_update = true;
    7000         258 :                 break;
    7001             : 
    7002          20 :             case MultiXactStatusUpdate:
    7003          20 :                 bits2 |= HEAP_KEYS_UPDATED;
    7004          20 :                 has_update = true;
    7005          20 :                 break;
    7006             :         }
    7007        4888 :     }
    7008             : 
    7009        2370 :     if (strongest == LockTupleExclusive ||
    7010             :         strongest == LockTupleNoKeyExclusive)
    7011         432 :         bits |= HEAP_XMAX_EXCL_LOCK;
    7012        1938 :     else if (strongest == LockTupleShare)
    7013         874 :         bits |= HEAP_XMAX_SHR_LOCK;
    7014        1064 :     else if (strongest == LockTupleKeyShare)
    7015        1064 :         bits |= HEAP_XMAX_KEYSHR_LOCK;
    7016             : 
    7017        2370 :     if (!has_update)
    7018        2092 :         bits |= HEAP_XMAX_LOCK_ONLY;
    7019             : 
    7020        2370 :     if (nmembers > 0)
    7021        2370 :         pfree(members);
    7022             : 
    7023        2370 :     *new_infomask = bits;
    7024        2370 :     *new_infomask2 = bits2;
    7025        2370 : }
    7026             : 
    7027             : /*
    7028             :  * MultiXactIdGetUpdateXid
    7029             :  *
    7030             :  * Given a multixact Xmax and corresponding infomask, which does not have the
    7031             :  * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
    7032             :  * transaction.
    7033             :  *
    7034             :  * Caller is expected to check the status of the updating transaction, if
    7035             :  * necessary.
    7036             :  */
    7037             : static TransactionId
    7038        1026 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
    7039             : {
    7040        1026 :     TransactionId update_xact = InvalidTransactionId;
    7041             :     MultiXactMember *members;
    7042             :     int         nmembers;
    7043             : 
    7044             :     Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
    7045             :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    7046             : 
    7047             :     /*
    7048             :      * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
    7049             :      * pre-pg_upgrade.
    7050             :      */
    7051        1026 :     nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
    7052             : 
    7053        1026 :     if (nmembers > 0)
    7054             :     {
    7055             :         int         i;
    7056             : 
    7057        2686 :         for (i = 0; i < nmembers; i++)
    7058             :         {
    7059             :             /* Ignore lockers */
    7060        2686 :             if (!ISUPDATE_from_mxstatus(members[i].status))
    7061        1660 :                 continue;
    7062             : 
    7063             :             /* there can be at most one updater */
    7064             :             Assert(update_xact == InvalidTransactionId);
    7065        1026 :             update_xact = members[i].xid;
    7066             : #ifndef USE_ASSERT_CHECKING
    7067             : 
    7068             :             /*
    7069             :              * in an assert-enabled build, walk the whole array to ensure
    7070             :              * there's no other updater.
    7071             :              */
    7072        1026 :             break;
    7073             : #endif
    7074             :         }
    7075             : 
    7076        1026 :         pfree(members);
    7077             :     }
    7078             : 
    7079        1026 :     return update_xact;
    7080             : }
    7081             : 
    7082             : /*
    7083             :  * HeapTupleGetUpdateXid
    7084             :  *      As above, but use a HeapTupleHeader
    7085             :  *
    7086             :  * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
    7087             :  * checking the hint bits.
    7088             :  */
    7089             : TransactionId
    7090        1010 : HeapTupleGetUpdateXid(HeapTupleHeader tuple)
    7091             : {
    7092        2020 :     return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
    7093        1010 :                                    tuple->t_infomask);
    7094             : }
    7095             : 
    7096             : /*
    7097             :  * Does the given multixact conflict with the current transaction grabbing a
    7098             :  * tuple lock of the given strength?
    7099             :  *
    7100             :  * The passed infomask pairs up with the given multixact in the tuple header.
    7101             :  *
    7102             :  * If current_is_member is not NULL, it is set to 'true' if the current
    7103             :  * transaction is a member of the given multixact.
    7104             :  */
    7105             : static bool
    7106         188 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
    7107             :                         LockTupleMode lockmode, bool *current_is_member)
    7108             : {
    7109             :     int         nmembers;
    7110             :     MultiXactMember *members;
    7111         188 :     bool        result = false;
    7112         188 :     LOCKMODE    wanted = tupleLockExtraInfo[lockmode].hwlock;
    7113             : 
    7114         188 :     if (HEAP_LOCKED_UPGRADED(infomask))
    7115           0 :         return false;
    7116             : 
    7117         188 :     nmembers = GetMultiXactIdMembers(multi, &members, false,
    7118         188 :                                      HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    7119         188 :     if (nmembers >= 0)
    7120             :     {
    7121             :         int         i;
    7122             : 
    7123         590 :         for (i = 0; i < nmembers; i++)
    7124             :         {
    7125             :             TransactionId memxid;
    7126             :             LOCKMODE    memlockmode;
    7127             : 
    7128         414 :             if (result && (current_is_member == NULL || *current_is_member))
    7129             :                 break;
    7130             : 
    7131         402 :             memlockmode = LOCKMODE_from_mxstatus(members[i].status);
    7132             : 
    7133             :             /* ignore members from current xact (but track their presence) */
    7134         402 :             memxid = members[i].xid;
    7135         402 :             if (TransactionIdIsCurrentTransactionId(memxid))
    7136             :             {
    7137         182 :                 if (current_is_member != NULL)
    7138         156 :                     *current_is_member = true;
    7139         182 :                 continue;
    7140             :             }
    7141         220 :             else if (result)
    7142          16 :                 continue;
    7143             : 
    7144             :             /* ignore members that don't conflict with the lock we want */
    7145         204 :             if (!DoLockModesConflict(memlockmode, wanted))
    7146         134 :                 continue;
    7147             : 
    7148          70 :             if (ISUPDATE_from_mxstatus(members[i].status))
    7149             :             {
    7150             :                 /* ignore aborted updaters */
    7151          34 :                 if (TransactionIdDidAbort(memxid))
    7152           2 :                     continue;
    7153             :             }
    7154             :             else
    7155             :             {
    7156             :                 /* ignore lockers-only that are no longer in progress */
    7157          36 :                 if (!TransactionIdIsInProgress(memxid))
    7158          10 :                     continue;
    7159             :             }
    7160             : 
    7161             :             /*
    7162             :              * Whatever remains are either live lockers that conflict with our
    7163             :              * wanted lock, and updaters that are not aborted.  Those conflict
    7164             :              * with what we want.  Set up to return true, but keep going to
    7165             :              * look for the current transaction among the multixact members,
    7166             :              * if needed.
    7167             :              */
    7168          58 :             result = true;
    7169             :         }
    7170         188 :         pfree(members);
    7171             :     }
    7172             : 
    7173         188 :     return result;
    7174             : }
    7175             : 
    7176             : /*
    7177             :  * Do_MultiXactIdWait
    7178             :  *      Actual implementation for the two functions below.
    7179             :  *
    7180             :  * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
    7181             :  * needed to ensure we only sleep on conflicting members, and the infomask is
    7182             :  * used to optimize multixact access in case it's a lock-only multi); 'nowait'
    7183             :  * indicates whether to use conditional lock acquisition, to allow callers to
    7184             :  * fail if lock is unavailable.  'rel', 'ctid' and 'oper' are used to set up
    7185             :  * context information for error messages.  'remaining', if not NULL, receives
    7186             :  * the number of members that are still running, including any (non-aborted)
    7187             :  * subtransactions of our own transaction.
    7188             :  *
    7189             :  * We do this by sleeping on each member using XactLockTableWait.  Any
    7190             :  * members that belong to the current backend are *not* waited for, however;
    7191             :  * this would not merely be useless but would lead to Assert failure inside
    7192             :  * XactLockTableWait.  By the time this returns, it is certain that all
    7193             :  * transactions *of other backends* that were members of the MultiXactId
    7194             :  * that conflict with the requested status are dead (and no new ones can have
    7195             :  * been added, since it is not legal to add members to an existing
    7196             :  * MultiXactId).
    7197             :  *
    7198             :  * But by the time we finish sleeping, someone else may have changed the Xmax
    7199             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7200             :  *
    7201             :  * Note that in case we return false, the number of remaining members is
    7202             :  * not to be trusted.
    7203             :  */
    7204             : static bool
    7205         112 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    7206             :                    uint16 infomask, bool nowait,
    7207             :                    Relation rel, ItemPointer ctid, XLTW_Oper oper,
    7208             :                    int *remaining)
    7209             : {
    7210         112 :     bool        result = true;
    7211             :     MultiXactMember *members;
    7212             :     int         nmembers;
    7213         112 :     int         remain = 0;
    7214             : 
    7215             :     /* for pre-pg_upgrade tuples, no need to sleep at all */
    7216         112 :     nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
    7217         112 :         GetMultiXactIdMembers(multi, &members, false,
    7218         112 :                               HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    7219             : 
    7220         112 :     if (nmembers >= 0)
    7221             :     {
    7222             :         int         i;
    7223             : 
    7224         362 :         for (i = 0; i < nmembers; i++)
    7225             :         {
    7226         258 :             TransactionId memxid = members[i].xid;
    7227         258 :             MultiXactStatus memstatus = members[i].status;
    7228             : 
    7229         258 :             if (TransactionIdIsCurrentTransactionId(memxid))
    7230             :             {
    7231          48 :                 remain++;
    7232          48 :                 continue;
    7233             :             }
    7234             : 
    7235         210 :             if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
    7236         210 :                                      LOCKMODE_from_mxstatus(status)))
    7237             :             {
    7238          40 :                 if (remaining && TransactionIdIsInProgress(memxid))
    7239          12 :                     remain++;
    7240          40 :                 continue;
    7241             :             }
    7242             : 
    7243             :             /*
    7244             :              * This member conflicts with our multi, so we have to sleep (or
    7245             :              * return failure, if asked to avoid waiting.)
    7246             :              *
    7247             :              * Note that we don't set up an error context callback ourselves,
    7248             :              * but instead we pass the info down to XactLockTableWait.  This
    7249             :              * might seem a bit wasteful because the context is set up and
    7250             :              * tore down for each member of the multixact, but in reality it
    7251             :              * should be barely noticeable, and it avoids duplicate code.
    7252             :              */
    7253         170 :             if (nowait)
    7254             :             {
    7255           8 :                 result = ConditionalXactLockTableWait(memxid);
    7256           8 :                 if (!result)
    7257           8 :                     break;
    7258             :             }
    7259             :             else
    7260         162 :                 XactLockTableWait(memxid, rel, ctid, oper);
    7261             :         }
    7262             : 
    7263         112 :         pfree(members);
    7264             :     }
    7265             : 
    7266         112 :     if (remaining)
    7267          16 :         *remaining = remain;
    7268             : 
    7269         112 :     return result;
    7270             : }
    7271             : 
    7272             : /*
    7273             :  * MultiXactIdWait
    7274             :  *      Sleep on a MultiXactId.
    7275             :  *
    7276             :  * By the time we finish sleeping, someone else may have changed the Xmax
    7277             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7278             :  *
    7279             :  * We return (in *remaining, if not NULL) the number of members that are still
    7280             :  * running, including any (non-aborted) subtransactions of our own transaction.
    7281             :  */
    7282             : static void
    7283         104 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
    7284             :                 Relation rel, ItemPointer ctid, XLTW_Oper oper,
    7285             :                 int *remaining)
    7286             : {
    7287         104 :     (void) Do_MultiXactIdWait(multi, status, infomask, false,
    7288             :                               rel, ctid, oper, remaining);
    7289         104 : }
    7290             : 
    7291             : /*
    7292             :  * ConditionalMultiXactIdWait
    7293             :  *      As above, but only lock if we can get the lock without blocking.
    7294             :  *
    7295             :  * By the time we finish sleeping, someone else may have changed the Xmax
    7296             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    7297             :  *
    7298             :  * If the multixact is now all gone, return true.  Returns false if some
    7299             :  * transactions might still be running.
    7300             :  *
    7301             :  * We return (in *remaining, if not NULL) the number of members that are still
    7302             :  * running, including any (non-aborted) subtransactions of our own transaction.
    7303             :  */
    7304             : static bool
    7305           8 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    7306             :                            uint16 infomask, Relation rel, int *remaining)
    7307             : {
    7308           8 :     return Do_MultiXactIdWait(multi, status, infomask, true,
    7309             :                               rel, NULL, XLTW_None, remaining);
    7310             : }
    7311             : 
    7312             : /*
    7313             :  * heap_tuple_needs_eventual_freeze
    7314             :  *
    7315             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    7316             :  * will eventually require freezing (if tuple isn't removed by pruning first).
    7317             :  */
    7318             : bool
    7319      171294 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
    7320             : {
    7321             :     TransactionId xid;
    7322             : 
    7323             :     /*
    7324             :      * If xmin is a normal transaction ID, this tuple is definitely not
    7325             :      * frozen.
    7326             :      */
    7327      171294 :     xid = HeapTupleHeaderGetXmin(tuple);
    7328      171294 :     if (TransactionIdIsNormal(xid))
    7329        4792 :         return true;
    7330             : 
    7331             :     /*
    7332             :      * If xmax is a valid xact or multixact, this tuple is also not frozen.
    7333             :      */
    7334      166502 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7335             :     {
    7336             :         MultiXactId multi;
    7337             : 
    7338           0 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    7339           0 :         if (MultiXactIdIsValid(multi))
    7340           0 :             return true;
    7341             :     }
    7342             :     else
    7343             :     {
    7344      166502 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    7345      166502 :         if (TransactionIdIsNormal(xid))
    7346           4 :             return true;
    7347             :     }
    7348             : 
    7349      166498 :     if (tuple->t_infomask & HEAP_MOVED)
    7350             :     {
    7351           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    7352           0 :         if (TransactionIdIsNormal(xid))
    7353           0 :             return true;
    7354             :     }
    7355             : 
    7356      166498 :     return false;
    7357             : }
    7358             : 
    7359             : /*
    7360             :  * heap_tuple_should_freeze
    7361             :  *
    7362             :  * Return value indicates if heap_prepare_freeze_tuple sibling function would
    7363             :  * (or should) force freezing of the heap page that contains caller's tuple.
    7364             :  * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
    7365             :  * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
    7366             :  *
    7367             :  * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
    7368             :  * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
    7369             :  * Our working assumption is that caller won't decide to freeze this tuple.
    7370             :  * It's up to caller to only ratchet back its own top-level trackers after the
    7371             :  * point that it fully commits to not freezing the tuple/page in question.
    7372             :  */
    7373             : bool
    7374     2845852 : heap_tuple_should_freeze(HeapTupleHeader tuple,
    7375             :                          const struct VacuumCutoffs *cutoffs,
    7376             :                          TransactionId *NoFreezePageRelfrozenXid,
    7377             :                          MultiXactId *NoFreezePageRelminMxid)
    7378             : {
    7379             :     TransactionId xid;
    7380             :     MultiXactId multi;
    7381     2845852 :     bool        freeze = false;
    7382             : 
    7383             :     /* First deal with xmin */
    7384     2845852 :     xid = HeapTupleHeaderGetXmin(tuple);
    7385     2845852 :     if (TransactionIdIsNormal(xid))
    7386             :     {
    7387             :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7388     2845188 :         if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7389       23256 :             *NoFreezePageRelfrozenXid = xid;
    7390     2845188 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    7391       21754 :             freeze = true;
    7392             :     }
    7393             : 
    7394             :     /* Now deal with xmax */
    7395     2845852 :     xid = InvalidTransactionId;
    7396     2845852 :     multi = InvalidMultiXactId;
    7397     2845852 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    7398           4 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    7399             :     else
    7400     2845848 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    7401             : 
    7402     2845852 :     if (TransactionIdIsNormal(xid))
    7403             :     {
    7404             :         Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7405             :         /* xmax is a non-permanent XID */
    7406      486766 :         if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7407           4 :             *NoFreezePageRelfrozenXid = xid;
    7408      486766 :         if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    7409           4 :             freeze = true;
    7410             :     }
    7411     2359086 :     else if (!MultiXactIdIsValid(multi))
    7412             :     {
    7413             :         /* xmax is a permanent XID or invalid MultiXactId/XID */
    7414             :     }
    7415           4 :     else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
    7416             :     {
    7417             :         /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
    7418           0 :         if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
    7419           0 :             *NoFreezePageRelminMxid = multi;
    7420             :         /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
    7421           0 :         freeze = true;
    7422             :     }
    7423             :     else
    7424             :     {
    7425             :         /* xmax is a MultiXactId that may have an updater XID */
    7426             :         MultiXactMember *members;
    7427             :         int         nmembers;
    7428             : 
    7429             :         Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
    7430           4 :         if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
    7431           4 :             *NoFreezePageRelminMxid = multi;
    7432           4 :         if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
    7433           4 :             freeze = true;
    7434             : 
    7435             :         /* need to check whether any member of the mxact is old */
    7436           4 :         nmembers = GetMultiXactIdMembers(multi, &members, false,
    7437           4 :                                          HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
    7438             : 
    7439          10 :         for (int i = 0; i < nmembers; i++)
    7440             :         {
    7441           6 :             xid = members[i].xid;
    7442             :             Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7443           6 :             if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7444           0 :                 *NoFreezePageRelfrozenXid = xid;
    7445           6 :             if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
    7446           0 :                 freeze = true;
    7447             :         }
    7448           4 :         if (nmembers > 0)
    7449           2 :             pfree(members);
    7450             :     }
    7451             : 
    7452     2845852 :     if (tuple->t_infomask & HEAP_MOVED)
    7453             :     {
    7454           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    7455           0 :         if (TransactionIdIsNormal(xid))
    7456             :         {
    7457             :             Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
    7458           0 :             if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
    7459           0 :                 *NoFreezePageRelfrozenXid = xid;
    7460             :             /* heap_prepare_freeze_tuple forces xvac freezing */
    7461           0 :             freeze = true;
    7462             :         }
    7463             :     }
    7464             : 
    7465     2845852 :     return freeze;
    7466             : }
    7467             : 
    7468             : /*
    7469             :  * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
    7470             :  * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
    7471             :  * that caller is in the process of physically removing, e.g. via HOT pruning
    7472             :  * or index deletion.
    7473             :  *
    7474             :  * Caller must initialize its value to InvalidTransactionId, which is
    7475             :  * generally interpreted as "definitely no need for a recovery conflict".
    7476             :  * Final value must reflect all heap tuples that caller will physically remove
    7477             :  * (or remove TID references to) via its ongoing pruning/deletion operation.
    7478             :  * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
    7479             :  * caller's WAL record) by REDO routine when it replays caller's operation.
    7480             :  */
    7481             : void
    7482     2849352 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
    7483             :                                       TransactionId *snapshotConflictHorizon)
    7484             : {
    7485     2849352 :     TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
    7486     2849352 :     TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
    7487     2849352 :     TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
    7488             : 
    7489     2849352 :     if (tuple->t_infomask & HEAP_MOVED)
    7490             :     {
    7491           0 :         if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
    7492           0 :             *snapshotConflictHorizon = xvac;
    7493             :     }
    7494             : 
    7495             :     /*
    7496             :      * Ignore tuples inserted by an aborted transaction or if the tuple was
    7497             :      * updated/deleted by the inserting transaction.
    7498             :      *
    7499             :      * Look for a committed hint bit, or if no xmin bit is set, check clog.
    7500             :      */
    7501     2849352 :     if (HeapTupleHeaderXminCommitted(tuple) ||
    7502      194972 :         (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
    7503             :     {
    7504     5082164 :         if (xmax != xmin &&
    7505     2373610 :             TransactionIdFollows(xmax, *snapshotConflictHorizon))
    7506      166240 :             *snapshotConflictHorizon = xmax;
    7507             :     }
    7508     2849352 : }
    7509             : 
    7510             : #ifdef USE_PREFETCH
    7511             : /*
    7512             :  * Helper function for heap_index_delete_tuples.  Issues prefetch requests for
    7513             :  * prefetch_count buffers.  The prefetch_state keeps track of all the buffers
    7514             :  * we can prefetch, and which have already been prefetched; each call to this
    7515             :  * function picks up where the previous call left off.
    7516             :  *
    7517             :  * Note: we expect the deltids array to be sorted in an order that groups TIDs
    7518             :  * by heap block, with all TIDs for each block appearing together in exactly
    7519             :  * one group.
    7520             :  */
    7521             : static void
    7522       34238 : index_delete_prefetch_buffer(Relation rel,
    7523             :                              IndexDeletePrefetchState *prefetch_state,
    7524             :                              int prefetch_count)
    7525             : {
    7526       34238 :     BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
    7527       34238 :     int         count = 0;
    7528             :     int         i;
    7529       34238 :     int         ndeltids = prefetch_state->ndeltids;
    7530       34238 :     TM_IndexDelete *deltids = prefetch_state->deltids;
    7531             : 
    7532     1211036 :     for (i = prefetch_state->next_item;
    7533     1184312 :          i < ndeltids && count < prefetch_count;
    7534     1176798 :          i++)
    7535             :     {
    7536     1176798 :         ItemPointer htid = &deltids[i].tid;
    7537             : 
    7538     2343188 :         if (cur_hblkno == InvalidBlockNumber ||
    7539     1166390 :             ItemPointerGetBlockNumber(htid) != cur_hblkno)
    7540             :         {
    7541       30814 :             cur_hblkno = ItemPointerGetBlockNumber(htid);
    7542       30814 :             PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
    7543       30814 :             count++;
    7544             :         }
    7545             :     }
    7546             : 
    7547             :     /*
    7548             :      * Save the prefetch position so that next time we can continue from that
    7549             :      * position.
    7550             :      */
    7551       34238 :     prefetch_state->next_item = i;
    7552       34238 :     prefetch_state->cur_hblkno = cur_hblkno;
    7553       34238 : }
    7554             : #endif
    7555             : 
    7556             : /*
    7557             :  * Helper function for heap_index_delete_tuples.  Checks for index corruption
    7558             :  * involving an invalid TID in index AM caller's index page.
    7559             :  *
    7560             :  * This is an ideal place for these checks.  The index AM must hold a buffer
    7561             :  * lock on the index page containing the TIDs we examine here, so we don't
    7562             :  * have to worry about concurrent VACUUMs at all.  We can be sure that the
    7563             :  * index is corrupt when htid points directly to an LP_UNUSED item or
    7564             :  * heap-only tuple, which is not the case during standard index scans.
    7565             :  */
    7566             : static inline void
    7567      984896 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
    7568             :                         Page page, OffsetNumber maxoff,
    7569             :                         ItemPointer htid, TM_IndexStatus *istatus)
    7570             : {
    7571      984896 :     OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
    7572             :     ItemId      iid;
    7573             : 
    7574             :     Assert(OffsetNumberIsValid(istatus->idxoffnum));
    7575             : 
    7576      984896 :     if (unlikely(indexpagehoffnum > maxoff))
    7577           0 :         ereport(ERROR,
    7578             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    7579             :                  errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
    7580             :                                  ItemPointerGetBlockNumber(htid),
    7581             :                                  indexpagehoffnum,
    7582             :                                  istatus->idxoffnum, delstate->iblknum,
    7583             :                                  RelationGetRelationName(delstate->irel))));
    7584             : 
    7585      984896 :     iid = PageGetItemId(page, indexpagehoffnum);
    7586      984896 :     if (unlikely(!ItemIdIsUsed(iid)))
    7587           0 :         ereport(ERROR,
    7588             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    7589             :                  errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
    7590             :                                  ItemPointerGetBlockNumber(htid),
    7591             :                                  indexpagehoffnum,
    7592             :                                  istatus->idxoffnum, delstate->iblknum,
    7593             :                                  RelationGetRelationName(delstate->irel))));
    7594             : 
    7595      984896 :     if (ItemIdHasStorage(iid))
    7596             :     {
    7597             :         HeapTupleHeader htup;
    7598             : 
    7599             :         Assert(ItemIdIsNormal(iid));
    7600      579300 :         htup = (HeapTupleHeader) PageGetItem(page, iid);
    7601             : 
    7602      579300 :         if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
    7603           0 :             ereport(ERROR,
    7604             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    7605             :                      errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
    7606             :                                      ItemPointerGetBlockNumber(htid),
    7607             :                                      indexpagehoffnum,
    7608             :                                      istatus->idxoffnum, delstate->iblknum,
    7609             :                                      RelationGetRelationName(delstate->irel))));
    7610             :     }
    7611      984896 : }
    7612             : 
    7613             : /*
    7614             :  * heapam implementation of tableam's index_delete_tuples interface.
    7615             :  *
    7616             :  * This helper function is called by index AMs during index tuple deletion.
    7617             :  * See tableam header comments for an explanation of the interface implemented
    7618             :  * here and a general theory of operation.  Note that each call here is either
    7619             :  * a simple index deletion call, or a bottom-up index deletion call.
    7620             :  *
    7621             :  * It's possible for this to generate a fair amount of I/O, since we may be
    7622             :  * deleting hundreds of tuples from a single index block.  To amortize that
    7623             :  * cost to some degree, this uses prefetching and combines repeat accesses to
    7624             :  * the same heap block.
    7625             :  */
    7626             : TransactionId
    7627       10408 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
    7628             : {
    7629             :     /* Initial assumption is that earlier pruning took care of conflict */
    7630       10408 :     TransactionId snapshotConflictHorizon = InvalidTransactionId;
    7631       10408 :     BlockNumber blkno = InvalidBlockNumber;
    7632       10408 :     Buffer      buf = InvalidBuffer;
    7633       10408 :     Page        page = NULL;
    7634       10408 :     OffsetNumber maxoff = InvalidOffsetNumber;
    7635             :     TransactionId priorXmax;
    7636             : #ifdef USE_PREFETCH
    7637             :     IndexDeletePrefetchState prefetch_state;
    7638             :     int         prefetch_distance;
    7639             : #endif
    7640             :     SnapshotData SnapshotNonVacuumable;
    7641       10408 :     int         finalndeltids = 0,
    7642       10408 :                 nblocksaccessed = 0;
    7643             : 
    7644             :     /* State that's only used in bottom-up index deletion case */
    7645       10408 :     int         nblocksfavorable = 0;
    7646       10408 :     int         curtargetfreespace = delstate->bottomupfreespace,
    7647       10408 :                 lastfreespace = 0,
    7648       10408 :                 actualfreespace = 0;
    7649       10408 :     bool        bottomup_final_block = false;
    7650             : 
    7651       10408 :     InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
    7652             : 
    7653             :     /* Sort caller's deltids array by TID for further processing */
    7654       10408 :     index_delete_sort(delstate);
    7655             : 
    7656             :     /*
    7657             :      * Bottom-up case: resort deltids array in an order attuned to where the
    7658             :      * greatest number of promising TIDs are to be found, and determine how
    7659             :      * many blocks from the start of sorted array should be considered
    7660             :      * favorable.  This will also shrink the deltids array in order to
    7661             :      * eliminate completely unfavorable blocks up front.
    7662             :      */
    7663       10408 :     if (delstate->bottomup)
    7664        3572 :         nblocksfavorable = bottomup_sort_and_shrink(delstate);
    7665             : 
    7666             : #ifdef USE_PREFETCH
    7667             :     /* Initialize prefetch state. */
    7668       10408 :     prefetch_state.cur_hblkno = InvalidBlockNumber;
    7669       10408 :     prefetch_state.next_item = 0;
    7670       10408 :     prefetch_state.ndeltids = delstate->ndeltids;
    7671       10408 :     prefetch_state.deltids = delstate->deltids;
    7672             : 
    7673             :     /*
    7674             :      * Determine the prefetch distance that we will attempt to maintain.
    7675             :      *
    7676             :      * Since the caller holds a buffer lock somewhere in rel, we'd better make
    7677             :      * sure that isn't a catalog relation before we call code that does
    7678             :      * syscache lookups, to avoid risk of deadlock.
    7679             :      */
    7680       10408 :     if (IsCatalogRelation(rel))
    7681        7162 :         prefetch_distance = maintenance_io_concurrency;
    7682             :     else
    7683             :         prefetch_distance =
    7684        3246 :             get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
    7685             : 
    7686             :     /* Cap initial prefetch distance for bottom-up deletion caller */
    7687       10408 :     if (delstate->bottomup)
    7688             :     {
    7689             :         Assert(nblocksfavorable >= 1);
    7690             :         Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
    7691        3572 :         prefetch_distance = Min(prefetch_distance, nblocksfavorable);
    7692             :     }
    7693             : 
    7694             :     /* Start prefetching. */
    7695       10408 :     index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
    7696             : #endif
    7697             : 
    7698             :     /* Iterate over deltids, determine which to delete, check their horizon */
    7699             :     Assert(delstate->ndeltids > 0);
    7700      995304 :     for (int i = 0; i < delstate->ndeltids; i++)
    7701             :     {
    7702      988468 :         TM_IndexDelete *ideltid = &delstate->deltids[i];
    7703      988468 :         TM_IndexStatus *istatus = delstate->status + ideltid->id;
    7704      988468 :         ItemPointer htid = &ideltid->tid;
    7705             :         OffsetNumber offnum;
    7706             : 
    7707             :         /*
    7708             :          * Read buffer, and perform required extra steps each time a new block
    7709             :          * is encountered.  Avoid refetching if it's the same block as the one
    7710             :          * from the last htid.
    7711             :          */
    7712     1966528 :         if (blkno == InvalidBlockNumber ||
    7713      978060 :             ItemPointerGetBlockNumber(htid) != blkno)
    7714             :         {
    7715             :             /*
    7716             :              * Consider giving up early for bottom-up index deletion caller
    7717             :              * first. (Only prefetch next-next block afterwards, when it
    7718             :              * becomes clear that we're at least going to access the next
    7719             :              * block in line.)
    7720             :              *
    7721             :              * Sometimes the first block frees so much space for bottom-up
    7722             :              * caller that the deletion process can end without accessing any
    7723             :              * more blocks.  It is usually necessary to access 2 or 3 blocks
    7724             :              * per bottom-up deletion operation, though.
    7725             :              */
    7726       27402 :             if (delstate->bottomup)
    7727             :             {
    7728             :                 /*
    7729             :                  * We often allow caller to delete a few additional items
    7730             :                  * whose entries we reached after the point that space target
    7731             :                  * from caller was satisfied.  The cost of accessing the page
    7732             :                  * was already paid at that point, so it made sense to finish
    7733             :                  * it off.  When that happened, we finalize everything here
    7734             :                  * (by finishing off the whole bottom-up deletion operation
    7735             :                  * without needlessly paying the cost of accessing any more
    7736             :                  * blocks).
    7737             :                  */
    7738        7764 :                 if (bottomup_final_block)
    7739         288 :                     break;
    7740             : 
    7741             :                 /*
    7742             :                  * Give up when we didn't enable our caller to free any
    7743             :                  * additional space as a result of processing the page that we
    7744             :                  * just finished up with.  This rule is the main way in which
    7745             :                  * we keep the cost of bottom-up deletion under control.
    7746             :                  */
    7747        7476 :                 if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
    7748        3284 :                     break;
    7749        4192 :                 lastfreespace = actualfreespace;    /* for next time */
    7750             : 
    7751             :                 /*
    7752             :                  * Deletion operation (which is bottom-up) will definitely
    7753             :                  * access the next block in line.  Prepare for that now.
    7754             :                  *
    7755             :                  * Decay target free space so that we don't hang on for too
    7756             :                  * long with a marginal case. (Space target is only truly
    7757             :                  * helpful when it allows us to recognize that we don't need
    7758             :                  * to access more than 1 or 2 blocks to satisfy caller due to
    7759             :                  * agreeable workload characteristics.)
    7760             :                  *
    7761             :                  * We are a bit more patient when we encounter contiguous
    7762             :                  * blocks, though: these are treated as favorable blocks.  The
    7763             :                  * decay process is only applied when the next block in line
    7764             :                  * is not a favorable/contiguous block.  This is not an
    7765             :                  * exception to the general rule; we still insist on finding
    7766             :                  * at least one deletable item per block accessed.  See
    7767             :                  * bottomup_nblocksfavorable() for full details of the theory
    7768             :                  * behind favorable blocks and heap block locality in general.
    7769             :                  *
    7770             :                  * Note: The first block in line is always treated as a
    7771             :                  * favorable block, so the earliest possible point that the
    7772             :                  * decay can be applied is just before we access the second
    7773             :                  * block in line.  The Assert() verifies this for us.
    7774             :                  */
    7775             :                 Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
    7776        4192 :                 if (nblocksfavorable > 0)
    7777        3846 :                     nblocksfavorable--;
    7778             :                 else
    7779         346 :                     curtargetfreespace /= 2;
    7780             :             }
    7781             : 
    7782             :             /* release old buffer */
    7783       23830 :             if (BufferIsValid(buf))
    7784       13422 :                 UnlockReleaseBuffer(buf);
    7785             : 
    7786       23830 :             blkno = ItemPointerGetBlockNumber(htid);
    7787       23830 :             buf = ReadBuffer(rel, blkno);
    7788       23830 :             nblocksaccessed++;
    7789             :             Assert(!delstate->bottomup ||
    7790             :                    nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
    7791             : 
    7792             : #ifdef USE_PREFETCH
    7793             : 
    7794             :             /*
    7795             :              * To maintain the prefetch distance, prefetch one more page for
    7796             :              * each page we read.
    7797             :              */
    7798       23830 :             index_delete_prefetch_buffer(rel, &prefetch_state, 1);
    7799             : #endif
    7800             : 
    7801       23830 :             LockBuffer(buf, BUFFER_LOCK_SHARE);
    7802             : 
    7803       23830 :             page = BufferGetPage(buf);
    7804       23830 :             maxoff = PageGetMaxOffsetNumber(page);
    7805             :         }
    7806             : 
    7807             :         /*
    7808             :          * In passing, detect index corruption involving an index page with a
    7809             :          * TID that points to a location in the heap that couldn't possibly be
    7810             :          * correct.  We only do this with actual TIDs from caller's index page
    7811             :          * (not items reached by traversing through a HOT chain).
    7812             :          */
    7813      984896 :         index_delete_check_htid(delstate, page, maxoff, htid, istatus);
    7814             : 
    7815      984896 :         if (istatus->knowndeletable)
    7816             :             Assert(!delstate->bottomup && !istatus->promising);
    7817             :         else
    7818             :         {
    7819      731024 :             ItemPointerData tmp = *htid;
    7820             :             HeapTupleData heapTuple;
    7821             : 
    7822             :             /* Are any tuples from this HOT chain non-vacuumable? */
    7823      731024 :             if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
    7824             :                                        &heapTuple, NULL, true))
    7825      442088 :                 continue;       /* can't delete entry */
    7826             : 
    7827             :             /* Caller will delete, since whole HOT chain is vacuumable */
    7828      288936 :             istatus->knowndeletable = true;
    7829             : 
    7830             :             /* Maintain index free space info for bottom-up deletion case */
    7831      288936 :             if (delstate->bottomup)
    7832             :             {
    7833             :                 Assert(istatus->freespace > 0);
    7834       16436 :                 actualfreespace += istatus->freespace;
    7835       16436 :                 if (actualfreespace >= curtargetfreespace)
    7836        4576 :                     bottomup_final_block = true;
    7837             :             }
    7838             :         }
    7839             : 
    7840             :         /*
    7841             :          * Maintain snapshotConflictHorizon value for deletion operation as a
    7842             :          * whole by advancing current value using heap tuple headers.  This is
    7843             :          * loosely based on the logic for pruning a HOT chain.
    7844             :          */
    7845      542808 :         offnum = ItemPointerGetOffsetNumber(htid);
    7846      542808 :         priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    7847             :         for (;;)
    7848       39714 :         {
    7849             :             ItemId      lp;
    7850             :             HeapTupleHeader htup;
    7851             : 
    7852             :             /* Sanity check (pure paranoia) */
    7853      582522 :             if (offnum < FirstOffsetNumber)
    7854           0 :                 break;
    7855             : 
    7856             :             /*
    7857             :              * An offset past the end of page's line pointer array is possible
    7858             :              * when the array was truncated
    7859             :              */
    7860      582522 :             if (offnum > maxoff)
    7861           0 :                 break;
    7862             : 
    7863      582522 :             lp = PageGetItemId(page, offnum);
    7864      582522 :             if (ItemIdIsRedirected(lp))
    7865             :             {
    7866       17786 :                 offnum = ItemIdGetRedirect(lp);
    7867       17786 :                 continue;
    7868             :             }
    7869             : 
    7870             :             /*
    7871             :              * We'll often encounter LP_DEAD line pointers (especially with an
    7872             :              * entry marked knowndeletable by our caller up front).  No heap
    7873             :              * tuple headers get examined for an htid that leads us to an
    7874             :              * LP_DEAD item.  This is okay because the earlier pruning
    7875             :              * operation that made the line pointer LP_DEAD in the first place
    7876             :              * must have considered the original tuple header as part of
    7877             :              * generating its own snapshotConflictHorizon value.
    7878             :              *
    7879             :              * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
    7880             :              * the same strategy that index vacuuming uses in all cases. Index
    7881             :              * VACUUM WAL records don't even have a snapshotConflictHorizon
    7882             :              * field of their own for this reason.
    7883             :              */
    7884      564736 :             if (!ItemIdIsNormal(lp))
    7885      363036 :                 break;
    7886             : 
    7887      201700 :             htup = (HeapTupleHeader) PageGetItem(page, lp);
    7888             : 
    7889             :             /*
    7890             :              * Check the tuple XMIN against prior XMAX, if any
    7891             :              */
    7892      223628 :             if (TransactionIdIsValid(priorXmax) &&
    7893       21928 :                 !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
    7894           0 :                 break;
    7895             : 
    7896      201700 :             HeapTupleHeaderAdvanceConflictHorizon(htup,
    7897             :                                                   &snapshotConflictHorizon);
    7898             : 
    7899             :             /*
    7900             :              * If the tuple is not HOT-updated, then we are at the end of this
    7901             :              * HOT-chain.  No need to visit later tuples from the same update
    7902             :              * chain (they get their own index entries) -- just move on to
    7903             :              * next htid from index AM caller.
    7904             :              */
    7905      201700 :             if (!HeapTupleHeaderIsHotUpdated(htup))
    7906             :                 break;
    7907             : 
    7908             :             /* Advance to next HOT chain member */
    7909             :             Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
    7910       21928 :             offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
    7911       21928 :             priorXmax = HeapTupleHeaderGetUpdateXid(htup);
    7912             :         }
    7913             : 
    7914             :         /* Enable further/final shrinking of deltids for caller */
    7915      542808 :         finalndeltids = i + 1;
    7916             :     }
    7917             : 
    7918       10408 :     UnlockReleaseBuffer(buf);
    7919             : 
    7920             :     /*
    7921             :      * Shrink deltids array to exclude non-deletable entries at the end.  This
    7922             :      * is not just a minor optimization.  Final deltids array size might be
    7923             :      * zero for a bottom-up caller.  Index AM is explicitly allowed to rely on
    7924             :      * ndeltids being zero in all cases with zero total deletable entries.
    7925             :      */
    7926             :     Assert(finalndeltids > 0 || delstate->bottomup);
    7927       10408 :     delstate->ndeltids = finalndeltids;
    7928             : 
    7929       10408 :     return snapshotConflictHorizon;
    7930             : }
    7931             : 
    7932             : /*
    7933             :  * Specialized inlineable comparison function for index_delete_sort()
    7934             :  */
    7935             : static inline int
    7936    23640418 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
    7937             : {
    7938    23640418 :     ItemPointer tid1 = &deltid1->tid;
    7939    23640418 :     ItemPointer tid2 = &deltid2->tid;
    7940             : 
    7941             :     {
    7942    23640418 :         BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
    7943    23640418 :         BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
    7944             : 
    7945    23640418 :         if (blk1 != blk2)
    7946     9708598 :             return (blk1 < blk2) ? -1 : 1;
    7947             :     }
    7948             :     {
    7949    13931820 :         OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
    7950    13931820 :         OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
    7951             : 
    7952    13931820 :         if (pos1 != pos2)
    7953    13931820 :             return (pos1 < pos2) ? -1 : 1;
    7954             :     }
    7955             : 
    7956             :     Assert(false);
    7957             : 
    7958           0 :     return 0;
    7959             : }
    7960             : 
    7961             : /*
    7962             :  * Sort deltids array from delstate by TID.  This prepares it for further
    7963             :  * processing by heap_index_delete_tuples().
    7964             :  *
    7965             :  * This operation becomes a noticeable consumer of CPU cycles with some
    7966             :  * workloads, so we go to the trouble of specialization/micro optimization.
    7967             :  * We use shellsort for this because it's easy to specialize, compiles to
    7968             :  * relatively few instructions, and is adaptive to presorted inputs/subsets
    7969             :  * (which are typical here).
    7970             :  */
    7971             : static void
    7972       10408 : index_delete_sort(TM_IndexDeleteOp *delstate)
    7973             : {
    7974       10408 :     TM_IndexDelete *deltids = delstate->deltids;
    7975       10408 :     int         ndeltids = delstate->ndeltids;
    7976       10408 :     int         low = 0;
    7977             : 
    7978             :     /*
    7979             :      * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
    7980             :      *
    7981             :      * This implementation is fast with array sizes up to ~4500.  This covers
    7982             :      * all supported BLCKSZ values.
    7983             :      */
    7984       10408 :     const int   gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
    7985             : 
    7986             :     /* Think carefully before changing anything here -- keep swaps cheap */
    7987             :     StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
    7988             :                      "element size exceeds 8 bytes");
    7989             : 
    7990      104080 :     for (int g = 0; g < lengthof(gaps); g++)
    7991             :     {
    7992    14155512 :         for (int hi = gaps[g], i = low + hi; i < ndeltids; i++)
    7993             :         {
    7994    14061840 :             TM_IndexDelete d = deltids[i];
    7995    14061840 :             int         j = i;
    7996             : 
    7997    24314610 :             while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
    7998             :             {
    7999    10252770 :                 deltids[j] = deltids[j - hi];
    8000    10252770 :                 j -= hi;
    8001             :             }
    8002    14061840 :             deltids[j] = d;
    8003             :         }
    8004             :     }
    8005       10408 : }
    8006             : 
    8007             : /*
    8008             :  * Returns how many blocks should be considered favorable/contiguous for a
    8009             :  * bottom-up index deletion pass.  This is a number of heap blocks that starts
    8010             :  * from and includes the first block in line.
    8011             :  *
    8012             :  * There is always at least one favorable block during bottom-up index
    8013             :  * deletion.  In the worst case (i.e. with totally random heap blocks) the
    8014             :  * first block in line (the only favorable block) can be thought of as a
    8015             :  * degenerate array of contiguous blocks that consists of a single block.
    8016             :  * heap_index_delete_tuples() will expect this.
    8017             :  *
    8018             :  * Caller passes blockgroups, a description of the final order that deltids
    8019             :  * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
    8020             :  * processing.  Note that deltids need not actually be sorted just yet (caller
    8021             :  * only passes deltids to us so that we can interpret blockgroups).
    8022             :  *
    8023             :  * You might guess that the existence of contiguous blocks cannot matter much,
    8024             :  * since in general the main factor that determines which blocks we visit is
    8025             :  * the number of promising TIDs, which is a fixed hint from the index AM.
    8026             :  * We're not really targeting the general case, though -- the actual goal is
    8027             :  * to adapt our behavior to a wide variety of naturally occurring conditions.
    8028             :  * The effects of most of the heuristics we apply are only noticeable in the
    8029             :  * aggregate, over time and across many _related_ bottom-up index deletion
    8030             :  * passes.
    8031             :  *
    8032             :  * Deeming certain blocks favorable allows heapam to recognize and adapt to
    8033             :  * workloads where heap blocks visited during bottom-up index deletion can be
    8034             :  * accessed contiguously, in the sense that each newly visited block is the
    8035             :  * neighbor of the block that bottom-up deletion just finished processing (or
    8036             :  * close enough to it).  It will likely be cheaper to access more favorable
    8037             :  * blocks sooner rather than later (e.g. in this pass, not across a series of
    8038             :  * related bottom-up passes).  Either way it is probably only a matter of time
    8039             :  * (or a matter of further correlated version churn) before all blocks that
    8040             :  * appear together as a single large batch of favorable blocks get accessed by
    8041             :  * _some_ bottom-up pass.  Large batches of favorable blocks tend to either
    8042             :  * appear almost constantly or not even once (it all depends on per-index
    8043             :  * workload characteristics).
    8044             :  *
    8045             :  * Note that the blockgroups sort order applies a power-of-two bucketing
    8046             :  * scheme that creates opportunities for contiguous groups of blocks to get
    8047             :  * batched together, at least with workloads that are naturally amenable to
    8048             :  * being driven by heap block locality.  This doesn't just enhance the spatial
    8049             :  * locality of bottom-up heap block processing in the obvious way.  It also
    8050             :  * enables temporal locality of access, since sorting by heap block number
    8051             :  * naturally tends to make the bottom-up processing order deterministic.
    8052             :  *
    8053             :  * Consider the following example to get a sense of how temporal locality
    8054             :  * might matter: There is a heap relation with several indexes, each of which
    8055             :  * is low to medium cardinality.  It is subject to constant non-HOT updates.
    8056             :  * The updates are skewed (in one part of the primary key, perhaps).  None of
    8057             :  * the indexes are logically modified by the UPDATE statements (if they were
    8058             :  * then bottom-up index deletion would not be triggered in the first place).
    8059             :  * Naturally, each new round of index tuples (for each heap tuple that gets a
    8060             :  * heap_update() call) will have the same heap TID in each and every index.
    8061             :  * Since these indexes are low cardinality and never get logically modified,
    8062             :  * heapam processing during bottom-up deletion passes will access heap blocks
    8063             :  * in approximately sequential order.  Temporal locality of access occurs due
    8064             :  * to bottom-up deletion passes behaving very similarly across each of the
    8065             :  * indexes at any given moment.  This keeps the number of buffer misses needed
    8066             :  * to visit heap blocks to a minimum.
    8067             :  */
    8068             : static int
    8069        3572 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
    8070             :                           TM_IndexDelete *deltids)
    8071             : {
    8072        3572 :     int64       lastblock = -1;
    8073        3572 :     int         nblocksfavorable = 0;
    8074             : 
    8075             :     Assert(nblockgroups >= 1);
    8076             :     Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
    8077             : 
    8078             :     /*
    8079             :      * We tolerate heap blocks that will be accessed only slightly out of
    8080             :      * physical order.  Small blips occur when a pair of almost-contiguous
    8081             :      * blocks happen to fall into different buckets (perhaps due only to a
    8082             :      * small difference in npromisingtids that the bucketing scheme didn't
    8083             :      * quite manage to ignore).  We effectively ignore these blips by applying
    8084             :      * a small tolerance.  The precise tolerance we use is a little arbitrary,
    8085             :      * but it works well enough in practice.
    8086             :      */
    8087       11102 :     for (int b = 0; b < nblockgroups; b++)
    8088             :     {
    8089       10672 :         IndexDeleteCounts *group = blockgroups + b;
    8090       10672 :         TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
    8091       10672 :         BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
    8092             : 
    8093       10672 :         if (lastblock != -1 &&
    8094        7100 :             ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
    8095        6022 :              (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
    8096             :             break;
    8097             : 
    8098        7530 :         nblocksfavorable++;
    8099        7530 :         lastblock = block;
    8100             :     }
    8101             : 
    8102             :     /* Always indicate that there is at least 1 favorable block */
    8103             :     Assert(nblocksfavorable >= 1);
    8104             : 
    8105        3572 :     return nblocksfavorable;
    8106             : }
    8107             : 
    8108             : /*
    8109             :  * qsort comparison function for bottomup_sort_and_shrink()
    8110             :  */
    8111             : static int
    8112      379812 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
    8113             : {
    8114      379812 :     const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
    8115      379812 :     const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
    8116             : 
    8117             :     /*
    8118             :      * Most significant field is npromisingtids (which we invert the order of
    8119             :      * so as to sort in desc order).
    8120             :      *
    8121             :      * Caller should have already normalized npromisingtids fields into
    8122             :      * power-of-two values (buckets).
    8123             :      */
    8124      379812 :     if (group1->npromisingtids > group2->npromisingtids)
    8125       16168 :         return -1;
    8126      363644 :     if (group1->npromisingtids < group2->npromisingtids)
    8127       19880 :         return 1;
    8128             : 
    8129             :     /*
    8130             :      * Tiebreak: desc ntids sort order.
    8131             :      *
    8132             :      * We cannot expect power-of-two values for ntids fields.  We should
    8133             :      * behave as if they were already rounded up for us instead.
    8134             :      */
    8135      343764 :     if (group1->ntids != group2->ntids)
    8136             :     {
    8137      245340 :         uint32      ntids1 = pg_nextpower2_32((uint32) group1->ntids);
    8138      245340 :         uint32      ntids2 = pg_nextpower2_32((uint32) group2->ntids);
    8139             : 
    8140      245340 :         if (ntids1 > ntids2)
    8141       37236 :             return -1;
    8142      208104 :         if (ntids1 < ntids2)
    8143       45582 :             return 1;
    8144             :     }
    8145             : 
    8146             :     /*
    8147             :      * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
    8148             :      * block in deltids array) order.
    8149             :      *
    8150             :      * This is equivalent to sorting in ascending heap block number order
    8151             :      * (among otherwise equal subsets of the array).  This approach allows us
    8152             :      * to avoid accessing the out-of-line TID.  (We rely on the assumption
    8153             :      * that the deltids array was sorted in ascending heap TID order when
    8154             :      * these offsets to the first TID from each heap block group were formed.)
    8155             :      */
    8156      260946 :     if (group1->ifirsttid > group2->ifirsttid)
    8157      129430 :         return 1;
    8158      131516 :     if (group1->ifirsttid < group2->ifirsttid)
    8159      131516 :         return -1;
    8160             : 
    8161           0 :     pg_unreachable();
    8162             : 
    8163             :     return 0;
    8164             : }
    8165             : 
    8166             : /*
    8167             :  * heap_index_delete_tuples() helper function for bottom-up deletion callers.
    8168             :  *
    8169             :  * Sorts deltids array in the order needed for useful processing by bottom-up
    8170             :  * deletion.  The array should already be sorted in TID order when we're
    8171             :  * called.  The sort process groups heap TIDs from deltids into heap block
    8172             :  * groupings.  Earlier/more-promising groups/blocks are usually those that are
    8173             :  * known to have the most "promising" TIDs.
    8174             :  *
    8175             :  * Sets new size of deltids array (ndeltids) in state.  deltids will only have
    8176             :  * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
    8177             :  * return.  This often means that deltids will be shrunk to a small fraction
    8178             :  * of its original size (we eliminate many heap blocks from consideration for
    8179             :  * caller up front).
    8180             :  *
    8181             :  * Returns the number of "favorable" blocks.  See bottomup_nblocksfavorable()
    8182             :  * for a definition and full details.
    8183             :  */
    8184             : static int
    8185        3572 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
    8186             : {
    8187             :     IndexDeleteCounts *blockgroups;
    8188             :     TM_IndexDelete *reordereddeltids;
    8189        3572 :     BlockNumber curblock = InvalidBlockNumber;
    8190        3572 :     int         nblockgroups = 0;
    8191        3572 :     int         ncopied = 0;
    8192        3572 :     int         nblocksfavorable = 0;
    8193             : 
    8194             :     Assert(delstate->bottomup);
    8195             :     Assert(delstate->ndeltids > 0);
    8196             : 
    8197             :     /* Calculate per-heap-block count of TIDs */
    8198        3572 :     blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
    8199     1760672 :     for (int i = 0; i < delstate->ndeltids; i++)
    8200             :     {
    8201     1757100 :         TM_IndexDelete *ideltid = &delstate->deltids[i];
    8202     1757100 :         TM_IndexStatus *istatus = delstate->status + ideltid->id;
    8203     1757100 :         ItemPointer htid = &ideltid->tid;
    8204     1757100 :         bool        promising = istatus->promising;
    8205             : 
    8206     1757100 :         if (curblock != ItemPointerGetBlockNumber(htid))
    8207             :         {
    8208             :             /* New block group */
    8209       71748 :             nblockgroups++;
    8210             : 
    8211             :             Assert(curblock < ItemPointerGetBlockNumber(htid) ||
    8212             :                    !BlockNumberIsValid(curblock));
    8213             : 
    8214       71748 :             curblock = ItemPointerGetBlockNumber(htid);
    8215       71748 :             blockgroups[nblockgroups - 1].ifirsttid = i;
    8216       71748 :             blockgroups[nblockgroups - 1].ntids = 1;
    8217       71748 :             blockgroups[nblockgroups - 1].npromisingtids = 0;
    8218             :         }
    8219             :         else
    8220             :         {
    8221     1685352 :             blockgroups[nblockgroups - 1].ntids++;
    8222             :         }
    8223             : 
    8224     1757100 :         if (promising)
    8225      217256 :             blockgroups[nblockgroups - 1].npromisingtids++;
    8226             :     }
    8227             : 
    8228             :     /*
    8229             :      * We're about ready to sort block groups to determine the optimal order
    8230             :      * for visiting heap blocks.  But before we do, round the number of
    8231             :      * promising tuples for each block group up to the next power-of-two,
    8232             :      * unless it is very low (less than 4), in which case we round up to 4.
    8233             :      * npromisingtids is far too noisy to trust when choosing between a pair
    8234             :      * of block groups that both have very low values.
    8235             :      *
    8236             :      * This scheme divides heap blocks/block groups into buckets.  Each bucket
    8237             :      * contains blocks that have _approximately_ the same number of promising
    8238             :      * TIDs as each other.  The goal is to ignore relatively small differences
    8239             :      * in the total number of promising entries, so that the whole process can
    8240             :      * give a little weight to heapam factors (like heap block locality)
    8241             :      * instead.  This isn't a trade-off, really -- we have nothing to lose. It
    8242             :      * would be foolish to interpret small differences in npromisingtids
    8243             :      * values as anything more than noise.
    8244             :      *
    8245             :      * We tiebreak on nhtids when sorting block group subsets that have the
    8246             :      * same npromisingtids, but this has the same issues as npromisingtids,
    8247             :      * and so nhtids is subject to the same power-of-two bucketing scheme. The
    8248             :      * only reason that we don't fix nhtids in the same way here too is that
    8249             :      * we'll need accurate nhtids values after the sort.  We handle nhtids
    8250             :      * bucketization dynamically instead (in the sort comparator).
    8251             :      *
    8252             :      * See bottomup_nblocksfavorable() for a full explanation of when and how
    8253             :      * heap locality/favorable blocks can significantly influence when and how
    8254             :      * heap blocks are accessed.
    8255             :      */
    8256       75320 :     for (int b = 0; b < nblockgroups; b++)
    8257             :     {
    8258       71748 :         IndexDeleteCounts *group = blockgroups + b;
    8259             : 
    8260             :         /* Better off falling back on nhtids with low npromisingtids */
    8261       71748 :         if (group->npromisingtids <= 4)
    8262       61662 :             group->npromisingtids = 4;
    8263             :         else
    8264       10086 :             group->npromisingtids =
    8265       10086 :                 pg_nextpower2_32((uint32) group->npromisingtids);
    8266             :     }
    8267             : 
    8268             :     /* Sort groups and rearrange caller's deltids array */
    8269        3572 :     qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
    8270             :           bottomup_sort_and_shrink_cmp);
    8271        3572 :     reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
    8272             : 
    8273        3572 :     nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
    8274             :     /* Determine number of favorable blocks at the start of final deltids */
    8275        3572 :     nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
    8276             :                                                  delstate->deltids);
    8277             : 
    8278       23786 :     for (int b = 0; b < nblockgroups; b++)
    8279             :     {
    8280       20214 :         IndexDeleteCounts *group = blockgroups + b;
    8281       20214 :         TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
    8282             : 
    8283       20214 :         memcpy(reordereddeltids + ncopied, firstdtid,
    8284       20214 :                sizeof(TM_IndexDelete) * group->ntids);
    8285       20214 :         ncopied += group->ntids;
    8286             :     }
    8287             : 
    8288             :     /* Copy final grouped and sorted TIDs back into start of caller's array */
    8289        3572 :     memcpy(delstate->deltids, reordereddeltids,
    8290             :            sizeof(TM_IndexDelete) * ncopied);
    8291        3572 :     delstate->ndeltids = ncopied;
    8292             : 
    8293        3572 :     pfree(reordereddeltids);
    8294        3572 :     pfree(blockgroups);
    8295             : 
    8296        3572 :     return nblocksfavorable;
    8297             : }
    8298             : 
    8299             : /*
    8300             :  * Perform XLogInsert for a heap-visible operation.  'block' is the block
    8301             :  * being marked all-visible, and vm_buffer is the buffer containing the
    8302             :  * corresponding visibility map block.  Both should have already been modified
    8303             :  * and dirtied.
    8304             :  *
    8305             :  * snapshotConflictHorizon comes from the largest xmin on the page being
    8306             :  * marked all-visible.  REDO routine uses it to generate recovery conflicts.
    8307             :  *
    8308             :  * If checksums or wal_log_hints are enabled, we may also generate a full-page
    8309             :  * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
    8310             :  * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
    8311             :  * update the heap page's LSN.
    8312             :  */
    8313             : XLogRecPtr
    8314       65176 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
    8315             :                  TransactionId snapshotConflictHorizon, uint8 vmflags)
    8316             : {
    8317             :     xl_heap_visible xlrec;
    8318             :     XLogRecPtr  recptr;
    8319             :     uint8       flags;
    8320             : 
    8321             :     Assert(BufferIsValid(heap_buffer));
    8322             :     Assert(BufferIsValid(vm_buffer));
    8323             : 
    8324       65176 :     xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
    8325       65176 :     xlrec.flags = vmflags;
    8326       65176 :     if (RelationIsAccessibleInLogicalDecoding(rel))
    8327         256 :         xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
    8328       65176 :     XLogBeginInsert();
    8329       65176 :     XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
    8330             : 
    8331       65176 :     XLogRegisterBuffer(0, vm_buffer, 0);
    8332             : 
    8333       65176 :     flags = REGBUF_STANDARD;
    8334       65176 :     if (!XLogHintBitIsNeeded())
    8335       53290 :         flags |= REGBUF_NO_IMAGE;
    8336       65176 :     XLogRegisterBuffer(1, heap_buffer, flags);
    8337             : 
    8338       65176 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
    8339             : 
    8340       65176 :     return recptr;
    8341             : }
    8342             : 
    8343             : /*
    8344             :  * Perform XLogInsert for a heap-update operation.  Caller must already
    8345             :  * have modified the buffer(s) and marked them dirty.
    8346             :  */
    8347             : static XLogRecPtr
    8348      543936 : log_heap_update(Relation reln, Buffer oldbuf,
    8349             :                 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
    8350             :                 HeapTuple old_key_tuple,
    8351             :                 bool all_visible_cleared, bool new_all_visible_cleared)
    8352             : {
    8353             :     xl_heap_update xlrec;
    8354             :     xl_heap_header xlhdr;
    8355             :     xl_heap_header xlhdr_idx;
    8356             :     uint8       info;
    8357             :     uint16      prefix_suffix[2];
    8358      543936 :     uint16      prefixlen = 0,
    8359      543936 :                 suffixlen = 0;
    8360             :     XLogRecPtr  recptr;
    8361      543936 :     Page        page = BufferGetPage(newbuf);
    8362      543936 :     bool        need_tuple_data = RelationIsLogicallyLogged(reln);
    8363             :     bool        init;
    8364             :     int         bufflags;
    8365             : 
    8366             :     /* Caller should not call me on a non-WAL-logged relation */
    8367             :     Assert(RelationNeedsWAL(reln));
    8368             : 
    8369      543936 :     XLogBeginInsert();
    8370             : 
    8371      543936 :     if (HeapTupleIsHeapOnly(newtup))
    8372      260586 :         info = XLOG_HEAP_HOT_UPDATE;
    8373             :     else
    8374      283350 :         info = XLOG_HEAP_UPDATE;
    8375             : 
    8376             :     /*
    8377             :      * If the old and new tuple are on the same page, we only need to log the
    8378             :      * parts of the new tuple that were changed.  That saves on the amount of
    8379             :      * WAL we need to write.  Currently, we just count any unchanged bytes in
    8380             :      * the beginning and end of the tuple.  That's quick to check, and
    8381             :      * perfectly covers the common case that only one field is updated.
    8382             :      *
    8383             :      * We could do this even if the old and new tuple are on different pages,
    8384             :      * but only if we don't make a full-page image of the old page, which is
    8385             :      * difficult to know in advance.  Also, if the old tuple is corrupt for
    8386             :      * some reason, it would allow the corruption to propagate the new page,
    8387             :      * so it seems best to avoid.  Under the general assumption that most
    8388             :      * updates tend to create the new tuple version on the same page, there
    8389             :      * isn't much to be gained by doing this across pages anyway.
    8390             :      *
    8391             :      * Skip this if we're taking a full-page image of the new page, as we
    8392             :      * don't include the new tuple in the WAL record in that case.  Also
    8393             :      * disable if wal_level='logical', as logical decoding needs to be able to
    8394             :      * read the new tuple in whole from the WAL record alone.
    8395             :      */
    8396      543936 :     if (oldbuf == newbuf && !need_tuple_data &&
    8397      257828 :         !XLogCheckBufferNeedsBackup(newbuf))
    8398             :     {
    8399      257276 :         char       *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
    8400      257276 :         char       *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
    8401      257276 :         int         oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
    8402      257276 :         int         newlen = newtup->t_len - newtup->t_data->t_hoff;
    8403             : 
    8404             :         /* Check for common prefix between old and new tuple */
    8405    19446784 :         for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
    8406             :         {
    8407    19404598 :             if (newp[prefixlen] != oldp[prefixlen])
    8408      215090 :                 break;
    8409             :         }
    8410             : 
    8411             :         /*
    8412             :          * Storing the length of the prefix takes 2 bytes, so we need to save
    8413             :          * at least 3 bytes or there's no point.
    8414             :          */
    8415      257276 :         if (prefixlen < 3)
    8416       44062 :             prefixlen = 0;
    8417             : 
    8418             :         /* Same for suffix */
    8419     8400316 :         for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
    8420             :         {
    8421     8357700 :             if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
    8422      214660 :                 break;
    8423             :         }
    8424      257276 :         if (suffixlen < 3)
    8425       60862 :             suffixlen = 0;
    8426             :     }
    8427             : 
    8428             :     /* Prepare main WAL data chain */
    8429      543936 :     xlrec.flags = 0;
    8430      543936 :     if (all_visible_cleared)
    8431        2244 :         xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
    8432      543936 :     if (new_all_visible_cleared)
    8433         962 :         xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
    8434      543936 :     if (prefixlen > 0)
    8435      213214 :         xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
    8436      543936 :     if (suffixlen > 0)
    8437      196414 :         xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
    8438      543936 :     if (need_tuple_data)
    8439             :     {
    8440       94016 :         xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
    8441       94016 :         if (old_key_tuple)
    8442             :         {
    8443         266 :             if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    8444         112 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
    8445             :             else
    8446         154 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
    8447             :         }
    8448             :     }
    8449             : 
    8450             :     /* If new tuple is the single and first tuple on page... */
    8451      550170 :     if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
    8452        6234 :         PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    8453             :     {
    8454        6084 :         info |= XLOG_HEAP_INIT_PAGE;
    8455        6084 :         init = true;
    8456             :     }
    8457             :     else
    8458      537852 :         init = false;
    8459             : 
    8460             :     /* Prepare WAL data for the old page */
    8461      543936 :     xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
    8462      543936 :     xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
    8463     1087872 :     xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
    8464      543936 :                                               oldtup->t_data->t_infomask2);
    8465             : 
    8466             :     /* Prepare WAL data for the new page */
    8467      543936 :     xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
    8468      543936 :     xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
    8469             : 
    8470      543936 :     bufflags = REGBUF_STANDARD;
    8471      543936 :     if (init)
    8472        6084 :         bufflags |= REGBUF_WILL_INIT;
    8473      543936 :     if (need_tuple_data)
    8474       94016 :         bufflags |= REGBUF_KEEP_DATA;
    8475             : 
    8476      543936 :     XLogRegisterBuffer(0, newbuf, bufflags);
    8477      543936 :     if (oldbuf != newbuf)
    8478      262252 :         XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
    8479             : 
    8480      543936 :     XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate);
    8481             : 
    8482             :     /*
    8483             :      * Prepare WAL data for the new tuple.
    8484             :      */
    8485      543936 :     if (prefixlen > 0 || suffixlen > 0)
    8486             :     {
    8487      256414 :         if (prefixlen > 0 && suffixlen > 0)
    8488             :         {
    8489      153214 :             prefix_suffix[0] = prefixlen;
    8490      153214 :             prefix_suffix[1] = suffixlen;
    8491      153214 :             XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2);
    8492             :         }
    8493      103200 :         else if (prefixlen > 0)
    8494             :         {
    8495       60000 :             XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16));
    8496             :         }
    8497             :         else
    8498             :         {
    8499       43200 :             XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16));
    8500             :         }
    8501             :     }
    8502             : 
    8503      543936 :     xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
    8504      543936 :     xlhdr.t_infomask = newtup->t_data->t_infomask;
    8505      543936 :     xlhdr.t_hoff = newtup->t_data->t_hoff;
    8506             :     Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
    8507             : 
    8508             :     /*
    8509             :      * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
    8510             :      *
    8511             :      * The 'data' doesn't include the common prefix or suffix.
    8512             :      */
    8513      543936 :     XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
    8514      543936 :     if (prefixlen == 0)
    8515             :     {
    8516      330722 :         XLogRegisterBufData(0,
    8517      330722 :                             ((char *) newtup->t_data) + SizeofHeapTupleHeader,
    8518      330722 :                             newtup->t_len - SizeofHeapTupleHeader - suffixlen);
    8519             :     }
    8520             :     else
    8521             :     {
    8522             :         /*
    8523             :          * Have to write the null bitmap and data after the common prefix as
    8524             :          * two separate rdata entries.
    8525             :          */
    8526             :         /* bitmap [+ padding] [+ oid] */
    8527      213214 :         if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
    8528             :         {
    8529      213214 :             XLogRegisterBufData(0,
    8530      213214 :                                 ((char *) newtup->t_data) + SizeofHeapTupleHeader,
    8531      213214 :                                 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
    8532             :         }
    8533             : 
    8534             :         /* data after common prefix */
    8535      213214 :         XLogRegisterBufData(0,
    8536      213214 :                             ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen,
    8537      213214 :                             newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
    8538             :     }
    8539             : 
    8540             :     /* We need to log a tuple identity */
    8541      543936 :     if (need_tuple_data && old_key_tuple)
    8542             :     {
    8543             :         /* don't really need this, but its more comfy to decode */
    8544         266 :         xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    8545         266 :         xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
    8546         266 :         xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
    8547             : 
    8548         266 :         XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
    8549             : 
    8550             :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    8551         266 :         XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
    8552         266 :                          old_key_tuple->t_len - SizeofHeapTupleHeader);
    8553             :     }
    8554             : 
    8555             :     /* filtering by origin on a row level is much more efficient */
    8556      543936 :     XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    8557             : 
    8558      543936 :     recptr = XLogInsert(RM_HEAP_ID, info);
    8559             : 
    8560      543936 :     return recptr;
    8561             : }
    8562             : 
    8563             : /*
    8564             :  * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
    8565             :  *
    8566             :  * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
    8567             :  * tuples.
    8568             :  */
    8569             : static XLogRecPtr
    8570       43778 : log_heap_new_cid(Relation relation, HeapTuple tup)
    8571             : {
    8572             :     xl_heap_new_cid xlrec;
    8573             : 
    8574             :     XLogRecPtr  recptr;
    8575       43778 :     HeapTupleHeader hdr = tup->t_data;
    8576             : 
    8577             :     Assert(ItemPointerIsValid(&tup->t_self));
    8578             :     Assert(tup->t_tableOid != InvalidOid);
    8579             : 
    8580       43778 :     xlrec.top_xid = GetTopTransactionId();
    8581       43778 :     xlrec.target_locator = relation->rd_locator;
    8582       43778 :     xlrec.target_tid = tup->t_self;
    8583             : 
    8584             :     /*
    8585             :      * If the tuple got inserted & deleted in the same TX we definitely have a
    8586             :      * combo CID, set cmin and cmax.
    8587             :      */
    8588       43778 :     if (hdr->t_infomask & HEAP_COMBOCID)
    8589             :     {
    8590             :         Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
    8591             :         Assert(!HeapTupleHeaderXminInvalid(hdr));
    8592        3950 :         xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
    8593        3950 :         xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
    8594        3950 :         xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
    8595             :     }
    8596             :     /* No combo CID, so only cmin or cmax can be set by this TX */
    8597             :     else
    8598             :     {
    8599             :         /*
    8600             :          * Tuple inserted.
    8601             :          *
    8602             :          * We need to check for LOCK ONLY because multixacts might be
    8603             :          * transferred to the new tuple in case of FOR KEY SHARE updates in
    8604             :          * which case there will be an xmax, although the tuple just got
    8605             :          * inserted.
    8606             :          */
    8607       39828 :         if (hdr->t_infomask & HEAP_XMAX_INVALID ||
    8608       11902 :             HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
    8609             :         {
    8610       27928 :             xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
    8611       27928 :             xlrec.cmax = InvalidCommandId;
    8612             :         }
    8613             :         /* Tuple from a different tx updated or deleted. */
    8614             :         else
    8615             :         {
    8616       11900 :             xlrec.cmin = InvalidCommandId;
    8617       11900 :             xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
    8618             :         }
    8619       39828 :         xlrec.combocid = InvalidCommandId;
    8620             :     }
    8621             : 
    8622             :     /*
    8623             :      * Note that we don't need to register the buffer here, because this
    8624             :      * operation does not modify the page. The insert/update/delete that
    8625             :      * called us certainly did, but that's WAL-logged separately.
    8626             :      */
    8627       43778 :     XLogBeginInsert();
    8628       43778 :     XLogRegisterData((char *) &xlrec, SizeOfHeapNewCid);
    8629             : 
    8630             :     /* will be looked at irrespective of origin */
    8631             : 
    8632       43778 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
    8633             : 
    8634       43778 :     return recptr;
    8635             : }
    8636             : 
    8637             : /*
    8638             :  * Build a heap tuple representing the configured REPLICA IDENTITY to represent
    8639             :  * the old tuple in an UPDATE or DELETE.
    8640             :  *
    8641             :  * Returns NULL if there's no need to log an identity or if there's no suitable
    8642             :  * key defined.
    8643             :  *
    8644             :  * Pass key_required true if any replica identity columns changed value, or if
    8645             :  * any of them have any external data.  Delete must always pass true.
    8646             :  *
    8647             :  * *copy is set to true if the returned tuple is a modified copy rather than
    8648             :  * the same tuple that was passed in.
    8649             :  */
    8650             : static HeapTuple
    8651     3321284 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
    8652             :                        bool *copy)
    8653             : {
    8654     3321284 :     TupleDesc   desc = RelationGetDescr(relation);
    8655     3321284 :     char        replident = relation->rd_rel->relreplident;
    8656             :     Bitmapset  *idattrs;
    8657             :     HeapTuple   key_tuple;
    8658             :     bool        nulls[MaxHeapAttributeNumber];
    8659             :     Datum       values[MaxHeapAttributeNumber];
    8660             : 
    8661     3321284 :     *copy = false;
    8662             : 
    8663     3321284 :     if (!RelationIsLogicallyLogged(relation))
    8664     3120764 :         return NULL;
    8665             : 
    8666      200520 :     if (replident == REPLICA_IDENTITY_NOTHING)
    8667         462 :         return NULL;
    8668             : 
    8669      200058 :     if (replident == REPLICA_IDENTITY_FULL)
    8670             :     {
    8671             :         /*
    8672             :          * When logging the entire old tuple, it very well could contain
    8673             :          * toasted columns. If so, force them to be inlined.
    8674             :          */
    8675         354 :         if (HeapTupleHasExternal(tp))
    8676             :         {
    8677           8 :             *copy = true;
    8678           8 :             tp = toast_flatten_tuple(tp, desc);
    8679             :         }
    8680         354 :         return tp;
    8681             :     }
    8682             : 
    8683             :     /* if the key isn't required and we're only logging the key, we're done */
    8684      199704 :     if (!key_required)
    8685       93750 :         return NULL;
    8686             : 
    8687             :     /* find out the replica identity columns */
    8688      105954 :     idattrs = RelationGetIndexAttrBitmap(relation,
    8689             :                                          INDEX_ATTR_BITMAP_IDENTITY_KEY);
    8690             : 
    8691             :     /*
    8692             :      * If there's no defined replica identity columns, treat as !key_required.
    8693             :      * (This case should not be reachable from heap_update, since that should
    8694             :      * calculate key_required accurately.  But heap_delete just passes
    8695             :      * constant true for key_required, so we can hit this case in deletes.)
    8696             :      */
    8697      105954 :     if (bms_is_empty(idattrs))
    8698       12042 :         return NULL;
    8699             : 
    8700             :     /*
    8701             :      * Construct a new tuple containing only the replica identity columns,
    8702             :      * with nulls elsewhere.  While we're at it, assert that the replica
    8703             :      * identity columns aren't null.
    8704             :      */
    8705       93912 :     heap_deform_tuple(tp, desc, values, nulls);
    8706             : 
    8707      301722 :     for (int i = 0; i < desc->natts; i++)
    8708             :     {
    8709      207810 :         if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
    8710             :                           idattrs))
    8711             :             Assert(!nulls[i]);
    8712             :         else
    8713      113880 :             nulls[i] = true;
    8714             :     }
    8715             : 
    8716       93912 :     key_tuple = heap_form_tuple(desc, values, nulls);
    8717       93912 :     *copy = true;
    8718             : 
    8719       93912 :     bms_free(idattrs);
    8720             : 
    8721             :     /*
    8722             :      * If the tuple, which by here only contains indexed columns, still has
    8723             :      * toasted columns, force them to be inlined. This is somewhat unlikely
    8724             :      * since there's limits on the size of indexed columns, so we don't
    8725             :      * duplicate toast_flatten_tuple()s functionality in the above loop over
    8726             :      * the indexed columns, even if it would be more efficient.
    8727             :      */
    8728       93912 :     if (HeapTupleHasExternal(key_tuple))
    8729             :     {
    8730           8 :         HeapTuple   oldtup = key_tuple;
    8731             : 
    8732           8 :         key_tuple = toast_flatten_tuple(oldtup, desc);
    8733           8 :         heap_freetuple(oldtup);
    8734             :     }
    8735             : 
    8736       93912 :     return key_tuple;
    8737             : }
    8738             : 
    8739             : /*
    8740             :  * Replay XLOG_HEAP2_PRUNE_* records.
    8741             :  */
    8742             : static void
    8743       16326 : heap_xlog_prune_freeze(XLogReaderState *record)
    8744             : {
    8745       16326 :     XLogRecPtr  lsn = record->EndRecPtr;
    8746       16326 :     char       *maindataptr = XLogRecGetData(record);
    8747             :     xl_heap_prune xlrec;
    8748             :     Buffer      buffer;
    8749             :     RelFileLocator rlocator;
    8750             :     BlockNumber blkno;
    8751             :     XLogRedoAction action;
    8752             : 
    8753       16326 :     XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
    8754       16326 :     memcpy(&xlrec, maindataptr, SizeOfHeapPrune);
    8755       16326 :     maindataptr += SizeOfHeapPrune;
    8756             : 
    8757             :     /*
    8758             :      * We will take an ordinary exclusive lock or a cleanup lock depending on
    8759             :      * whether the XLHP_CLEANUP_LOCK flag is set.  With an ordinary exclusive
    8760             :      * lock, we better not be doing anything that requires moving existing
    8761             :      * tuple data.
    8762             :      */
    8763             :     Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
    8764             :            (xlrec.flags & (XLHP_HAS_REDIRECTIONS | XLHP_HAS_DEAD_ITEMS)) == 0);
    8765             : 
    8766             :     /*
    8767             :      * We are about to remove and/or freeze tuples.  In Hot Standby mode,
    8768             :      * ensure that there are no queries running for which the removed tuples
    8769             :      * are still visible or which still consider the frozen xids as running.
    8770             :      * The conflict horizon XID comes after xl_heap_prune.
    8771             :      */
    8772       16326 :     if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
    8773             :     {
    8774             :         TransactionId snapshot_conflict_horizon;
    8775             : 
    8776             :         /* memcpy() because snapshot_conflict_horizon is stored unaligned */
    8777       12846 :         memcpy(&snapshot_conflict_horizon, maindataptr, sizeof(TransactionId));
    8778       12846 :         maindataptr += sizeof(TransactionId);
    8779             : 
    8780       12846 :         if (InHotStandby)
    8781       12466 :             ResolveRecoveryConflictWithSnapshot(snapshot_conflict_horizon,
    8782       12466 :                                                 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
    8783             :                                                 rlocator);
    8784             :     }
    8785             : 
    8786             :     /*
    8787             :      * If we have a full-page image, restore it and we're done.
    8788             :      */
    8789       16326 :     action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL,
    8790       16326 :                                            (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
    8791             :                                            &buffer);
    8792       16326 :     if (action == BLK_NEEDS_REDO)
    8793             :     {
    8794       14748 :         Page        page = (Page) BufferGetPage(buffer);
    8795             :         OffsetNumber *redirected;
    8796             :         OffsetNumber *nowdead;
    8797             :         OffsetNumber *nowunused;
    8798             :         int         nredirected;
    8799             :         int         ndead;
    8800             :         int         nunused;
    8801             :         int         nplans;
    8802             :         Size        datalen;
    8803             :         xlhp_freeze_plan *plans;
    8804             :         OffsetNumber *frz_offsets;
    8805       14748 :         char       *dataptr = XLogRecGetBlockData(record, 0, &datalen);
    8806             : 
    8807       14748 :         heap_xlog_deserialize_prune_and_freeze(dataptr, xlrec.flags,
    8808             :                                                &nplans, &plans, &frz_offsets,
    8809             :                                                &nredirected, &redirected,
    8810             :                                                &ndead, &nowdead,
    8811             :                                                &nunused, &nowunused);
    8812             : 
    8813             :         /*
    8814             :          * Update all line pointers per the record, and repair fragmentation
    8815             :          * if needed.
    8816             :          */
    8817       14748 :         if (nredirected > 0 || ndead > 0 || nunused > 0)
    8818       14678 :             heap_page_prune_execute(buffer,
    8819       14678 :                                     (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
    8820             :                                     redirected, nredirected,
    8821             :                                     nowdead, ndead,
    8822             :                                     nowunused, nunused);
    8823             : 
    8824             :         /* Freeze tuples */
    8825       14900 :         for (int p = 0; p < nplans; p++)
    8826             :         {
    8827             :             HeapTupleFreeze frz;
    8828             : 
    8829             :             /*
    8830             :              * Convert freeze plan representation from WAL record into
    8831             :              * per-tuple format used by heap_execute_freeze_tuple
    8832             :              */
    8833         152 :             frz.xmax = plans[p].xmax;
    8834         152 :             frz.t_infomask2 = plans[p].t_infomask2;
    8835         152 :             frz.t_infomask = plans[p].t_infomask;
    8836         152 :             frz.frzflags = plans[p].frzflags;
    8837         152 :             frz.offset = InvalidOffsetNumber;   /* unused, but be tidy */
    8838             : 
    8839        3806 :             for (int i = 0; i < plans[p].ntuples; i++)
    8840             :             {
    8841        3654 :                 OffsetNumber offset = *(frz_offsets++);
    8842             :                 ItemId      lp;
    8843             :                 HeapTupleHeader tuple;
    8844             : 
    8845        3654 :                 lp = PageGetItemId(page, offset);
    8846        3654 :                 tuple = (HeapTupleHeader) PageGetItem(page, lp);
    8847        3654 :                 heap_execute_freeze_tuple(tuple, &frz);
    8848             :             }
    8849             :         }
    8850             : 
    8851             :         /* There should be no more data */
    8852             :         Assert((char *) frz_offsets == dataptr + datalen);
    8853             : 
    8854             :         /*
    8855             :          * Note: we don't worry about updating the page's prunability hints.
    8856             :          * At worst this will cause an extra prune cycle to occur soon.
    8857             :          */
    8858             : 
    8859       14748 :         PageSetLSN(page, lsn);
    8860       14748 :         MarkBufferDirty(buffer);
    8861             :     }
    8862             : 
    8863             :     /*
    8864             :      * If we released any space or line pointers, update the free space map.
    8865             :      *
    8866             :      * Do this regardless of a full-page image being applied, since the FSM
    8867             :      * data is not in the page anyway.
    8868             :      */
    8869       16326 :     if (BufferIsValid(buffer))
    8870             :     {
    8871       16326 :         if (xlrec.flags & (XLHP_HAS_REDIRECTIONS |
    8872             :                            XLHP_HAS_DEAD_ITEMS |
    8873             :                            XLHP_HAS_NOW_UNUSED_ITEMS))
    8874             :         {
    8875       16256 :             Size        freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
    8876             : 
    8877       16256 :             UnlockReleaseBuffer(buffer);
    8878             : 
    8879       16256 :             XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
    8880             :         }
    8881             :         else
    8882          70 :             UnlockReleaseBuffer(buffer);
    8883             :     }
    8884       16326 : }
    8885             : 
    8886             : /*
    8887             :  * Replay XLOG_HEAP2_VISIBLE record.
    8888             :  *
    8889             :  * The critical integrity requirement here is that we must never end up with
    8890             :  * a situation where the visibility map bit is set, and the page-level
    8891             :  * PD_ALL_VISIBLE bit is clear.  If that were to occur, then a subsequent
    8892             :  * page modification would fail to clear the visibility map bit.
    8893             :  */
    8894             : static void
    8895        7366 : heap_xlog_visible(XLogReaderState *record)
    8896             : {
    8897        7366 :     XLogRecPtr  lsn = record->EndRecPtr;
    8898        7366 :     xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
    8899        7366 :     Buffer      vmbuffer = InvalidBuffer;
    8900             :     Buffer      buffer;
    8901             :     Page        page;
    8902             :     RelFileLocator rlocator;
    8903             :     BlockNumber blkno;
    8904             :     XLogRedoAction action;
    8905             : 
    8906             :     Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
    8907             : 
    8908        7366 :     XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
    8909             : 
    8910             :     /*
    8911             :      * If there are any Hot Standby transactions running that have an xmin
    8912             :      * horizon old enough that this page isn't all-visible for them, they
    8913             :      * might incorrectly decide that an index-only scan can skip a heap fetch.
    8914             :      *
    8915             :      * NB: It might be better to throw some kind of "soft" conflict here that
    8916             :      * forces any index-only scan that is in flight to perform heap fetches,
    8917             :      * rather than killing the transaction outright.
    8918             :      */
    8919        7366 :     if (InHotStandby)
    8920        7018 :         ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
    8921        7018 :                                             xlrec->flags & VISIBILITYMAP_XLOG_CATALOG_REL,
    8922             :                                             rlocator);
    8923             : 
    8924             :     /*
    8925             :      * Read the heap page, if it still exists. If the heap file has dropped or
    8926             :      * truncated later in recovery, we don't need to update the page, but we'd
    8927             :      * better still update the visibility map.
    8928             :      */
    8929        7366 :     action = XLogReadBufferForRedo(record, 1, &buffer);
    8930        7366 :     if (action == BLK_NEEDS_REDO)
    8931             :     {
    8932             :         /*
    8933             :          * We don't bump the LSN of the heap page when setting the visibility
    8934             :          * map bit (unless checksums or wal_hint_bits is enabled, in which
    8935             :          * case we must). This exposes us to torn page hazards, but since
    8936             :          * we're not inspecting the existing page contents in any way, we
    8937             :          * don't care.
    8938             :          */
    8939        5116 :         page = BufferGetPage(buffer);
    8940             : 
    8941        5116 :         PageSetAllVisible(page);
    8942             : 
    8943        5116 :         if (XLogHintBitIsNeeded())
    8944        4936 :             PageSetLSN(page, lsn);
    8945             : 
    8946        5116 :         MarkBufferDirty(buffer);
    8947             :     }
    8948             :     else if (action == BLK_RESTORED)
    8949             :     {
    8950             :         /*
    8951             :          * If heap block was backed up, we already restored it and there's
    8952             :          * nothing more to do. (This can only happen with checksums or
    8953             :          * wal_log_hints enabled.)
    8954             :          */
    8955             :     }
    8956             : 
    8957        7366 :     if (BufferIsValid(buffer))
    8958             :     {
    8959        7276 :         Size        space = PageGetFreeSpace(BufferGetPage(buffer));
    8960             : 
    8961        7276 :         UnlockReleaseBuffer(buffer);
    8962             : 
    8963             :         /*
    8964             :          * Since FSM is not WAL-logged and only updated heuristically, it
    8965             :          * easily becomes stale in standbys.  If the standby is later promoted
    8966             :          * and runs VACUUM, it will skip updating individual free space
    8967             :          * figures for pages that became all-visible (or all-frozen, depending
    8968             :          * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
    8969             :          * propagates too optimistic free space values to upper FSM layers;
    8970             :          * later inserters try to use such pages only to find out that they
    8971             :          * are unusable.  This can cause long stalls when there are many such
    8972             :          * pages.
    8973             :          *
    8974             :          * Forestall those problems by updating FSM's idea about a page that
    8975             :          * is becoming all-visible or all-frozen.
    8976             :          *
    8977             :          * Do this regardless of a full-page image being applied, since the
    8978             :          * FSM data is not in the page anyway.
    8979             :          */
    8980        7276 :         if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
    8981        7276 :             XLogRecordPageWithFreeSpace(rlocator, blkno, space);
    8982             :     }
    8983             : 
    8984             :     /*
    8985             :      * Even if we skipped the heap page update due to the LSN interlock, it's
    8986             :      * still safe to update the visibility map.  Any WAL record that clears
    8987             :      * the visibility map bit does so before checking the page LSN, so any
    8988             :      * bits that need to be cleared will still be cleared.
    8989             :      */
    8990        7366 :     if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
    8991             :                                       &vmbuffer) == BLK_NEEDS_REDO)
    8992             :     {
    8993        6950 :         Page        vmpage = BufferGetPage(vmbuffer);
    8994             :         Relation    reln;
    8995             :         uint8       vmbits;
    8996             : 
    8997             :         /* initialize the page if it was read as zeros */
    8998        6950 :         if (PageIsNew(vmpage))
    8999           0 :             PageInit(vmpage, BLCKSZ, 0);
    9000             : 
    9001             :         /* remove VISIBILITYMAP_XLOG_* */
    9002        6950 :         vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS;
    9003             : 
    9004             :         /*
    9005             :          * XLogReadBufferForRedoExtended locked the buffer. But
    9006             :          * visibilitymap_set will handle locking itself.
    9007             :          */
    9008        6950 :         LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
    9009             : 
    9010        6950 :         reln = CreateFakeRelcacheEntry(rlocator);
    9011        6950 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    9012             : 
    9013        6950 :         visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
    9014             :                           xlrec->snapshotConflictHorizon, vmbits);
    9015             : 
    9016        6950 :         ReleaseBuffer(vmbuffer);
    9017        6950 :         FreeFakeRelcacheEntry(reln);
    9018             :     }
    9019         416 :     else if (BufferIsValid(vmbuffer))
    9020         416 :         UnlockReleaseBuffer(vmbuffer);
    9021        7366 : }
    9022             : 
    9023             : /*
    9024             :  * Given an "infobits" field from an XLog record, set the correct bits in the
    9025             :  * given infomask and infomask2 for the tuple touched by the record.
    9026             :  *
    9027             :  * (This is the reverse of compute_infobits).
    9028             :  */
    9029             : static void
    9030      875378 : fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
    9031             : {
    9032      875378 :     *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
    9033             :                    HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
    9034      875378 :     *infomask2 &= ~HEAP_KEYS_UPDATED;
    9035             : 
    9036      875378 :     if (infobits & XLHL_XMAX_IS_MULTI)
    9037           4 :         *infomask |= HEAP_XMAX_IS_MULTI;
    9038      875378 :     if (infobits & XLHL_XMAX_LOCK_ONLY)
    9039      109564 :         *infomask |= HEAP_XMAX_LOCK_ONLY;
    9040      875378 :     if (infobits & XLHL_XMAX_EXCL_LOCK)
    9041      108800 :         *infomask |= HEAP_XMAX_EXCL_LOCK;
    9042             :     /* note HEAP_XMAX_SHR_LOCK isn't considered here */
    9043      875378 :     if (infobits & XLHL_XMAX_KEYSHR_LOCK)
    9044         786 :         *infomask |= HEAP_XMAX_KEYSHR_LOCK;
    9045             : 
    9046      875378 :     if (infobits & XLHL_KEYS_UPDATED)
    9047      583060 :         *infomask2 |= HEAP_KEYS_UPDATED;
    9048      875378 : }
    9049             : 
    9050             : static void
    9051      580758 : heap_xlog_delete(XLogReaderState *record)
    9052             : {
    9053      580758 :     XLogRecPtr  lsn = record->EndRecPtr;
    9054      580758 :     xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
    9055             :     Buffer      buffer;
    9056             :     Page        page;
    9057      580758 :     ItemId      lp = NULL;
    9058             :     HeapTupleHeader htup;
    9059             :     BlockNumber blkno;
    9060             :     RelFileLocator target_locator;
    9061             :     ItemPointerData target_tid;
    9062             : 
    9063      580758 :     XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
    9064      580758 :     ItemPointerSetBlockNumber(&target_tid, blkno);
    9065      580758 :     ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
    9066             : 
    9067             :     /*
    9068             :      * The visibility map may need to be fixed even if the heap page is
    9069             :      * already up-to-date.
    9070             :      */
    9071      580758 :     if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
    9072             :     {
    9073           8 :         Relation    reln = CreateFakeRelcacheEntry(target_locator);
    9074           8 :         Buffer      vmbuffer = InvalidBuffer;
    9075             : 
    9076           8 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    9077           8 :         visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
    9078           8 :         ReleaseBuffer(vmbuffer);
    9079           8 :         FreeFakeRelcacheEntry(reln);
    9080             :     }
    9081             : 
    9082      580758 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    9083             :     {
    9084      580548 :         page = BufferGetPage(buffer);
    9085             : 
    9086      580548 :         if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
    9087      580548 :             lp = PageGetItemId(page, xlrec->offnum);
    9088             : 
    9089      580548 :         if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
    9090           0 :             elog(PANIC, "invalid lp");
    9091             : 
    9092      580548 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    9093             : 
    9094      580548 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    9095      580548 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    9096      580548 :         HeapTupleHeaderClearHotUpdated(htup);
    9097      580548 :         fix_infomask_from_infobits(xlrec->infobits_set,
    9098             :                                    &htup->t_infomask, &htup->t_infomask2);
    9099      580548 :         if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
    9100      580548 :             HeapTupleHeaderSetXmax(htup, xlrec->xmax);
    9101             :         else
    9102           0 :             HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
    9103      580548 :         HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
    9104             : 
    9105             :         /* Mark the page as a candidate for pruning */
    9106      580548 :         PageSetPrunable(page, XLogRecGetXid(record));
    9107             : 
    9108      580548 :         if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
    9109           6 :             PageClearAllVisible(page);
    9110             : 
    9111             :         /* Make sure t_ctid is set correctly */
    9112      580548 :         if (xlrec->flags & XLH_DELETE_IS_PARTITION_MOVE)
    9113         264 :             HeapTupleHeaderSetMovedPartitions(htup);
    9114             :         else
    9115      580284 :             htup->t_ctid = target_tid;
    9116      580548 :         PageSetLSN(page, lsn);
    9117      580548 :         MarkBufferDirty(buffer);
    9118             :     }
    9119      580758 :     if (BufferIsValid(buffer))
    9120      580758 :         UnlockReleaseBuffer(buffer);
    9121      580758 : }
    9122             : 
    9123             : static void
    9124     2510420 : heap_xlog_insert(XLogReaderState *record)
    9125             : {
    9126     2510420 :     XLogRecPtr  lsn = record->EndRecPtr;
    9127     2510420 :     xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
    9128             :     Buffer      buffer;
    9129             :     Page        page;
    9130             :     union
    9131             :     {
    9132             :         HeapTupleHeaderData hdr;
    9133             :         char        data[MaxHeapTupleSize];
    9134             :     }           tbuf;
    9135             :     HeapTupleHeader htup;
    9136             :     xl_heap_header xlhdr;
    9137             :     uint32      newlen;
    9138     2510420 :     Size        freespace = 0;
    9139             :     RelFileLocator target_locator;
    9140             :     BlockNumber blkno;
    9141             :     ItemPointerData target_tid;
    9142             :     XLogRedoAction action;
    9143             : 
    9144     2510420 :     XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
    9145     2510420 :     ItemPointerSetBlockNumber(&target_tid, blkno);
    9146     2510420 :     ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
    9147             : 
    9148             :     /*
    9149             :      * The visibility map may need to be fixed even if the heap page is
    9150             :      * already up-to-date.
    9151             :      */
    9152     2510420 :     if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    9153             :     {
    9154        1076 :         Relation    reln = CreateFakeRelcacheEntry(target_locator);
    9155        1076 :         Buffer      vmbuffer = InvalidBuffer;
    9156             : 
    9157        1076 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    9158        1076 :         visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
    9159        1076 :         ReleaseBuffer(vmbuffer);
    9160        1076 :         FreeFakeRelcacheEntry(reln);
    9161             :     }
    9162             : 
    9163             :     /*
    9164             :      * If we inserted the first and only tuple on the page, re-initialize the
    9165             :      * page from scratch.
    9166             :      */
    9167     2510420 :     if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
    9168             :     {
    9169       32776 :         buffer = XLogInitBufferForRedo(record, 0);
    9170       32776 :         page = BufferGetPage(buffer);
    9171       32776 :         PageInit(page, BufferGetPageSize(buffer), 0);
    9172       32776 :         action = BLK_NEEDS_REDO;
    9173             :     }
    9174             :     else
    9175     2477644 :         action = XLogReadBufferForRedo(record, 0, &buffer);
    9176     2510420 :     if (action == BLK_NEEDS_REDO)
    9177             :     {
    9178             :         Size        datalen;
    9179             :         char       *data;
    9180             : 
    9181     2509154 :         page = BufferGetPage(buffer);
    9182             : 
    9183     2509154 :         if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
    9184           0 :             elog(PANIC, "invalid max offset number");
    9185             : 
    9186     2509154 :         data = XLogRecGetBlockData(record, 0, &datalen);
    9187             : 
    9188     2509154 :         newlen = datalen - SizeOfHeapHeader;
    9189             :         Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
    9190     2509154 :         memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
    9191     2509154 :         data += SizeOfHeapHeader;
    9192             : 
    9193     2509154 :         htup = &tbuf.hdr;
    9194     2509154 :         MemSet((char *) htup, 0, SizeofHeapTupleHeader);
    9195             :         /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
    9196     2509154 :         memcpy((char *) htup + SizeofHeapTupleHeader,
    9197             :                data,
    9198             :                newlen);
    9199     2509154 :         newlen += SizeofHeapTupleHeader;
    9200     2509154 :         htup->t_infomask2 = xlhdr.t_infomask2;
    9201     2509154 :         htup->t_infomask = xlhdr.t_infomask;
    9202     2509154 :         htup->t_hoff = xlhdr.t_hoff;
    9203     2509154 :         HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
    9204     2509154 :         HeapTupleHeaderSetCmin(htup, FirstCommandId);
    9205     2509154 :         htup->t_ctid = target_tid;
    9206             : 
    9207     2509154 :         if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
    9208             :                         true, true) == InvalidOffsetNumber)
    9209           0 :             elog(PANIC, "failed to add tuple");
    9210             : 
    9211     2509154 :         freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
    9212             : 
    9213     2509154 :         PageSetLSN(page, lsn);
    9214             : 
    9215     2509154 :         if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    9216         572 :             PageClearAllVisible(page);
    9217             : 
    9218             :         /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
    9219     2509154 :         if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
    9220           0 :             PageSetAllVisible(page);
    9221             : 
    9222     2509154 :         MarkBufferDirty(buffer);
    9223             :     }
    9224     2510420 :     if (BufferIsValid(buffer))
    9225     2510420 :         UnlockReleaseBuffer(buffer);
    9226             : 
    9227             :     /*
    9228             :      * If the page is running low on free space, update the FSM as well.
    9229             :      * Arbitrarily, our definition of "low" is less than 20%. We can't do much
    9230             :      * better than that without knowing the fill-factor for the table.
    9231             :      *
    9232             :      * XXX: Don't do this if the page was restored from full page image. We
    9233             :      * don't bother to update the FSM in that case, it doesn't need to be
    9234             :      * totally accurate anyway.
    9235             :      */
    9236     2510420 :     if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
    9237      493954 :         XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
    9238     2510420 : }
    9239             : 
    9240             : /*
    9241             :  * Handles MULTI_INSERT record type.
    9242             :  */
    9243             : static void
    9244      105266 : heap_xlog_multi_insert(XLogReaderState *record)
    9245             : {
    9246      105266 :     XLogRecPtr  lsn = record->EndRecPtr;
    9247             :     xl_heap_multi_insert *xlrec;
    9248             :     RelFileLocator rlocator;
    9249             :     BlockNumber blkno;
    9250             :     Buffer      buffer;
    9251             :     Page        page;
    9252             :     union
    9253             :     {
    9254             :         HeapTupleHeaderData hdr;
    9255             :         char        data[MaxHeapTupleSize];
    9256             :     }           tbuf;
    9257             :     HeapTupleHeader htup;
    9258             :     uint32      newlen;
    9259      105266 :     Size        freespace = 0;
    9260             :     int         i;
    9261      105266 :     bool        isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
    9262             :     XLogRedoAction action;
    9263             : 
    9264             :     /*
    9265             :      * Insertion doesn't overwrite MVCC data, so no conflict processing is
    9266             :      * required.
    9267             :      */
    9268      105266 :     xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
    9269             : 
    9270      105266 :     XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
    9271             : 
    9272             :     /* check that the mutually exclusive flags are not both set */
    9273             :     Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
    9274             :              (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
    9275             : 
    9276             :     /*
    9277             :      * The visibility map may need to be fixed even if the heap page is
    9278             :      * already up-to-date.
    9279             :      */
    9280      105266 :     if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    9281             :     {
    9282         960 :         Relation    reln = CreateFakeRelcacheEntry(rlocator);
    9283         960 :         Buffer      vmbuffer = InvalidBuffer;
    9284             : 
    9285         960 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    9286         960 :         visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
    9287         960 :         ReleaseBuffer(vmbuffer);
    9288         960 :         FreeFakeRelcacheEntry(reln);
    9289             :     }
    9290             : 
    9291      105266 :     if (isinit)
    9292             :     {
    9293        4124 :         buffer = XLogInitBufferForRedo(record, 0);
    9294        4124 :         page = BufferGetPage(buffer);
    9295        4124 :         PageInit(page, BufferGetPageSize(buffer), 0);
    9296        4124 :         action = BLK_NEEDS_REDO;
    9297             :     }
    9298             :     else
    9299      101142 :         action = XLogReadBufferForRedo(record, 0, &buffer);
    9300      105266 :     if (action == BLK_NEEDS_REDO)
    9301             :     {
    9302             :         char       *tupdata;
    9303             :         char       *endptr;
    9304             :         Size        len;
    9305             : 
    9306             :         /* Tuples are stored as block data */
    9307      104360 :         tupdata = XLogRecGetBlockData(record, 0, &len);
    9308      104360 :         endptr = tupdata + len;
    9309             : 
    9310      104360 :         page = (Page) BufferGetPage(buffer);
    9311             : 
    9312      505862 :         for (i = 0; i < xlrec->ntuples; i++)
    9313             :         {
    9314             :             OffsetNumber offnum;
    9315             :             xl_multi_insert_tuple *xlhdr;
    9316             : 
    9317             :             /*
    9318             :              * If we're reinitializing the page, the tuples are stored in
    9319             :              * order from FirstOffsetNumber. Otherwise there's an array of
    9320             :              * offsets in the WAL record, and the tuples come after that.
    9321             :              */
    9322      401502 :             if (isinit)
    9323      200016 :                 offnum = FirstOffsetNumber + i;
    9324             :             else
    9325      201486 :                 offnum = xlrec->offsets[i];
    9326      401502 :             if (PageGetMaxOffsetNumber(page) + 1 < offnum)
    9327           0 :                 elog(PANIC, "invalid max offset number");
    9328             : 
    9329      401502 :             xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
    9330      401502 :             tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
    9331             : 
    9332      401502 :             newlen = xlhdr->datalen;
    9333             :             Assert(newlen <= MaxHeapTupleSize);
    9334      401502 :             htup = &tbuf.hdr;
    9335      401502 :             MemSet((char *) htup, 0, SizeofHeapTupleHeader);
    9336             :             /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
    9337      401502 :             memcpy((char *) htup + SizeofHeapTupleHeader,
    9338             :                    (char *) tupdata,
    9339             :                    newlen);
    9340      401502 :             tupdata += newlen;
    9341             : 
    9342      401502 :             newlen += SizeofHeapTupleHeader;
    9343      401502 :             htup->t_infomask2 = xlhdr->t_infomask2;
    9344      401502 :             htup->t_infomask = xlhdr->t_infomask;
    9345      401502 :             htup->t_hoff = xlhdr->t_hoff;
    9346      401502 :             HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
    9347      401502 :             HeapTupleHeaderSetCmin(htup, FirstCommandId);
    9348      401502 :             ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
    9349      401502 :             ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
    9350             : 
    9351      401502 :             offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
    9352      401502 :             if (offnum == InvalidOffsetNumber)
    9353           0 :                 elog(PANIC, "failed to add tuple");
    9354             :         }
    9355      104360 :         if (tupdata != endptr)
    9356           0 :             elog(PANIC, "total tuple length mismatch");
    9357             : 
    9358      104360 :         freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
    9359             : 
    9360      104360 :         PageSetLSN(page, lsn);
    9361             : 
    9362      104360 :         if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    9363         266 :             PageClearAllVisible(page);
    9364             : 
    9365             :         /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
    9366      104360 :         if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
    9367           8 :             PageSetAllVisible(page);
    9368             : 
    9369      104360 :         MarkBufferDirty(buffer);
    9370             :     }
    9371      105266 :     if (BufferIsValid(buffer))
    9372      105266 :         UnlockReleaseBuffer(buffer);
    9373             : 
    9374             :     /*
    9375             :      * If the page is running low on free space, update the FSM as well.
    9376             :      * Arbitrarily, our definition of "low" is less than 20%. We can't do much
    9377             :      * better than that without knowing the fill-factor for the table.
    9378             :      *
    9379             :      * XXX: Don't do this if the page was restored from full page image. We
    9380             :      * don't bother to update the FSM in that case, it doesn't need to be
    9381             :      * totally accurate anyway.
    9382             :      */
    9383      105266 :     if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
    9384       26902 :         XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
    9385      105266 : }
    9386             : 
    9387             : /*
    9388             :  * Handles UPDATE and HOT_UPDATE
    9389             :  */
    9390             : static void
    9391      185314 : heap_xlog_update(XLogReaderState *record, bool hot_update)
    9392             : {
    9393      185314 :     XLogRecPtr  lsn = record->EndRecPtr;
    9394      185314 :     xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
    9395             :     RelFileLocator rlocator;
    9396             :     BlockNumber oldblk;
    9397             :     BlockNumber newblk;
    9398             :     ItemPointerData newtid;
    9399             :     Buffer      obuffer,
    9400             :                 nbuffer;
    9401             :     Page        page;
    9402             :     OffsetNumber offnum;
    9403      185314 :     ItemId      lp = NULL;
    9404             :     HeapTupleData oldtup;
    9405             :     HeapTupleHeader htup;
    9406      185314 :     uint16      prefixlen = 0,
    9407      185314 :                 suffixlen = 0;
    9408             :     char       *newp;
    9409             :     union
    9410             :     {
    9411             :         HeapTupleHeaderData hdr;
    9412             :         char        data[MaxHeapTupleSize];
    9413             :     }           tbuf;
    9414             :     xl_heap_header xlhdr;
    9415             :     uint32      newlen;
    9416      185314 :     Size        freespace = 0;
    9417             :     XLogRedoAction oldaction;
    9418             :     XLogRedoAction newaction;
    9419             : 
    9420             :     /* initialize to keep the compiler quiet */
    9421      185314 :     oldtup.t_data = NULL;
    9422      185314 :     oldtup.t_len = 0;
    9423             : 
    9424      185314 :     XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
    9425      185314 :     if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
    9426             :     {
    9427             :         /* HOT updates are never done across pages */
    9428             :         Assert(!hot_update);
    9429             :     }
    9430             :     else
    9431       77360 :         oldblk = newblk;
    9432             : 
    9433      185314 :     ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
    9434             : 
    9435             :     /*
    9436             :      * The visibility map may need to be fixed even if the heap page is
    9437             :      * already up-to-date.
    9438             :      */
    9439      185314 :     if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
    9440             :     {
    9441         490 :         Relation    reln = CreateFakeRelcacheEntry(rlocator);
    9442         490 :         Buffer      vmbuffer = InvalidBuffer;
    9443             : 
    9444         490 :         visibilitymap_pin(reln, oldblk, &vmbuffer);
    9445         490 :         visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
    9446         490 :         ReleaseBuffer(vmbuffer);
    9447         490 :         FreeFakeRelcacheEntry(reln);
    9448             :     }
    9449             : 
    9450             :     /*
    9451             :      * In normal operation, it is important to lock the two pages in
    9452             :      * page-number order, to avoid possible deadlocks against other update
    9453             :      * operations going the other way.  However, during WAL replay there can
    9454             :      * be no other update happening, so we don't need to worry about that. But
    9455             :      * we *do* need to worry that we don't expose an inconsistent state to Hot
    9456             :      * Standby queries --- so the original page can't be unlocked before we've
    9457             :      * added the new tuple to the new page.
    9458             :      */
    9459             : 
    9460             :     /* Deal with old tuple version */
    9461      185314 :     oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
    9462             :                                       &obuffer);
    9463      185314 :     if (oldaction == BLK_NEEDS_REDO)
    9464             :     {
    9465      185266 :         page = BufferGetPage(obuffer);
    9466      185266 :         offnum = xlrec->old_offnum;
    9467      185266 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    9468      185266 :             lp = PageGetItemId(page, offnum);
    9469             : 
    9470      185266 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    9471           0 :             elog(PANIC, "invalid lp");
    9472             : 
    9473      185266 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    9474             : 
    9475      185266 :         oldtup.t_data = htup;
    9476      185266 :         oldtup.t_len = ItemIdGetLength(lp);
    9477             : 
    9478      185266 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    9479      185266 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    9480      185266 :         if (hot_update)
    9481       72210 :             HeapTupleHeaderSetHotUpdated(htup);
    9482             :         else
    9483      113056 :             HeapTupleHeaderClearHotUpdated(htup);
    9484      185266 :         fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
    9485             :                                    &htup->t_infomask2);
    9486      185266 :         HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
    9487      185266 :         HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
    9488             :         /* Set forward chain link in t_ctid */
    9489      185266 :         htup->t_ctid = newtid;
    9490             : 
    9491             :         /* Mark the page as a candidate for pruning */
    9492      185266 :         PageSetPrunable(page, XLogRecGetXid(record));
    9493             : 
    9494      185266 :         if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
    9495         486 :             PageClearAllVisible(page);
    9496             : 
    9497      185266 :         PageSetLSN(page, lsn);
    9498      185266 :         MarkBufferDirty(obuffer);
    9499             :     }
    9500             : 
    9501             :     /*
    9502             :      * Read the page the new tuple goes into, if different from old.
    9503             :      */
    9504      185314 :     if (oldblk == newblk)
    9505             :     {
    9506       77360 :         nbuffer = obuffer;
    9507       77360 :         newaction = oldaction;
    9508             :     }
    9509      107954 :     else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
    9510             :     {
    9511        1300 :         nbuffer = XLogInitBufferForRedo(record, 0);
    9512        1300 :         page = (Page) BufferGetPage(nbuffer);
    9513        1300 :         PageInit(page, BufferGetPageSize(nbuffer), 0);
    9514        1300 :         newaction = BLK_NEEDS_REDO;
    9515             :     }
    9516             :     else
    9517      106654 :         newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
    9518             : 
    9519             :     /*
    9520             :      * The visibility map may need to be fixed even if the heap page is
    9521             :      * already up-to-date.
    9522             :      */
    9523      185314 :     if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
    9524             :     {
    9525          74 :         Relation    reln = CreateFakeRelcacheEntry(rlocator);
    9526          74 :         Buffer      vmbuffer = InvalidBuffer;
    9527             : 
    9528          74 :         visibilitymap_pin(reln, newblk, &vmbuffer);
    9529          74 :         visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
    9530          74 :         ReleaseBuffer(vmbuffer);
    9531          74 :         FreeFakeRelcacheEntry(reln);
    9532             :     }
    9533             : 
    9534             :     /* Deal with new tuple */
    9535      185314 :     if (newaction == BLK_NEEDS_REDO)
    9536             :     {
    9537             :         char       *recdata;
    9538             :         char       *recdata_end;
    9539             :         Size        datalen;
    9540             :         Size        tuplen;
    9541             : 
    9542      185216 :         recdata = XLogRecGetBlockData(record, 0, &datalen);
    9543      185216 :         recdata_end = recdata + datalen;
    9544             : 
    9545      185216 :         page = BufferGetPage(nbuffer);
    9546             : 
    9547      185216 :         offnum = xlrec->new_offnum;
    9548      185216 :         if (PageGetMaxOffsetNumber(page) + 1 < offnum)
    9549           0 :             elog(PANIC, "invalid max offset number");
    9550             : 
    9551      185216 :         if (xlrec->flags & XLH_UPDATE_PREFIX_FROM_OLD)
    9552             :         {
    9553             :             Assert(newblk == oldblk);
    9554       30170 :             memcpy(&prefixlen, recdata, sizeof(uint16));
    9555       30170 :             recdata += sizeof(uint16);
    9556             :         }
    9557      185216 :         if (xlrec->flags & XLH_UPDATE_SUFFIX_FROM_OLD)
    9558             :         {
    9559             :             Assert(newblk == oldblk);
    9560       67210 :             memcpy(&suffixlen, recdata, sizeof(uint16));
    9561       67210 :             recdata += sizeof(uint16);
    9562             :         }
    9563             : 
    9564      185216 :         memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
    9565      185216 :         recdata += SizeOfHeapHeader;
    9566             : 
    9567      185216 :         tuplen = recdata_end - recdata;
    9568             :         Assert(tuplen <= MaxHeapTupleSize);
    9569             : 
    9570      185216 :         htup = &tbuf.hdr;
    9571      185216 :         MemSet((char *) htup, 0, SizeofHeapTupleHeader);
    9572             : 
    9573             :         /*
    9574             :          * Reconstruct the new tuple using the prefix and/or suffix from the
    9575             :          * old tuple, and the data stored in the WAL record.
    9576             :          */
    9577      185216 :         newp = (char *) htup + SizeofHeapTupleHeader;
    9578      185216 :         if (prefixlen > 0)
    9579             :         {
    9580             :             int         len;
    9581             : 
    9582             :             /* copy bitmap [+ padding] [+ oid] from WAL record */
    9583       30170 :             len = xlhdr.t_hoff - SizeofHeapTupleHeader;
    9584       30170 :             memcpy(newp, recdata, len);
    9585       30170 :             recdata += len;
    9586       30170 :             newp += len;
    9587             : 
    9588             :             /* copy prefix from old tuple */
    9589       30170 :             memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
    9590       30170 :             newp += prefixlen;
    9591             : 
    9592             :             /* copy new tuple data from WAL record */
    9593       30170 :             len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
    9594       30170 :             memcpy(newp, recdata, len);
    9595       30170 :             recdata += len;
    9596       30170 :             newp += len;
    9597             :         }
    9598             :         else
    9599             :         {
    9600             :             /*
    9601             :              * copy bitmap [+ padding] [+ oid] + data from record, all in one
    9602             :              * go
    9603             :              */
    9604      155046 :             memcpy(newp, recdata, tuplen);
    9605      155046 :             recdata += tuplen;
    9606      155046 :             newp += tuplen;
    9607             :         }
    9608             :         Assert(recdata == recdata_end);
    9609             : 
    9610             :         /* copy suffix from old tuple */
    9611      185216 :         if (suffixlen > 0)
    9612       67210 :             memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
    9613             : 
    9614      185216 :         newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
    9615      185216 :         htup->t_infomask2 = xlhdr.t_infomask2;
    9616      185216 :         htup->t_infomask = xlhdr.t_infomask;
    9617      185216 :         htup->t_hoff = xlhdr.t_hoff;
    9618             : 
    9619      185216 :         HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
    9620      185216 :         HeapTupleHeaderSetCmin(htup, FirstCommandId);
    9621      185216 :         HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
    9622             :         /* Make sure there is no forward chain link in t_ctid */
    9623      185216 :         htup->t_ctid = newtid;
    9624             : 
    9625      185216 :         offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
    9626      185216 :         if (offnum == InvalidOffsetNumber)
    9627           0 :             elog(PANIC, "failed to add tuple");
    9628             : 
    9629      185216 :         if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
    9630          38 :             PageClearAllVisible(page);
    9631             : 
    9632      185216 :         freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
    9633             : 
    9634      185216 :         PageSetLSN(page, lsn);
    9635      185216 :         MarkBufferDirty(nbuffer);
    9636             :     }
    9637             : 
    9638      185314 :     if (BufferIsValid(nbuffer) && nbuffer != obuffer)
    9639      107954 :         UnlockReleaseBuffer(nbuffer);
    9640      185314 :     if (BufferIsValid(obuffer))
    9641      185314 :         UnlockReleaseBuffer(obuffer);
    9642             : 
    9643             :     /*
    9644             :      * If the new page is running low on free space, update the FSM as well.
    9645             :      * Arbitrarily, our definition of "low" is less than 20%. We can't do much
    9646             :      * better than that without knowing the fill-factor for the table.
    9647             :      *
    9648             :      * However, don't update the FSM on HOT updates, because after crash
    9649             :      * recovery, either the old or the new tuple will certainly be dead and
    9650             :      * prunable. After pruning, the page will have roughly as much free space
    9651             :      * as it did before the update, assuming the new tuple is about the same
    9652             :      * size as the old one.
    9653             :      *
    9654             :      * XXX: Don't do this if the page was restored from full page image. We
    9655             :      * don't bother to update the FSM in that case, it doesn't need to be
    9656             :      * totally accurate anyway.
    9657             :      */
    9658      185314 :     if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
    9659       22702 :         XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
    9660      185314 : }
    9661             : 
    9662             : static void
    9663         122 : heap_xlog_confirm(XLogReaderState *record)
    9664             : {
    9665         122 :     XLogRecPtr  lsn = record->EndRecPtr;
    9666         122 :     xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
    9667             :     Buffer      buffer;
    9668             :     Page        page;
    9669             :     OffsetNumber offnum;
    9670         122 :     ItemId      lp = NULL;
    9671             :     HeapTupleHeader htup;
    9672             : 
    9673         122 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    9674             :     {
    9675         122 :         page = BufferGetPage(buffer);
    9676             : 
    9677         122 :         offnum = xlrec->offnum;
    9678         122 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    9679         122 :             lp = PageGetItemId(page, offnum);
    9680             : 
    9681         122 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    9682           0 :             elog(PANIC, "invalid lp");
    9683             : 
    9684         122 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    9685             : 
    9686             :         /*
    9687             :          * Confirm tuple as actually inserted
    9688             :          */
    9689         122 :         ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
    9690             : 
    9691         122 :         PageSetLSN(page, lsn);
    9692         122 :         MarkBufferDirty(buffer);
    9693             :     }
    9694         122 :     if (BufferIsValid(buffer))
    9695         122 :         UnlockReleaseBuffer(buffer);
    9696         122 : }
    9697             : 
    9698             : static void
    9699      109678 : heap_xlog_lock(XLogReaderState *record)
    9700             : {
    9701      109678 :     XLogRecPtr  lsn = record->EndRecPtr;
    9702      109678 :     xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
    9703             :     Buffer      buffer;
    9704             :     Page        page;
    9705             :     OffsetNumber offnum;
    9706      109678 :     ItemId      lp = NULL;
    9707             :     HeapTupleHeader htup;
    9708             : 
    9709             :     /*
    9710             :      * The visibility map may need to be fixed even if the heap page is
    9711             :      * already up-to-date.
    9712             :      */
    9713      109678 :     if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
    9714             :     {
    9715             :         RelFileLocator rlocator;
    9716          22 :         Buffer      vmbuffer = InvalidBuffer;
    9717             :         BlockNumber block;
    9718             :         Relation    reln;
    9719             : 
    9720          22 :         XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
    9721          22 :         reln = CreateFakeRelcacheEntry(rlocator);
    9722             : 
    9723          22 :         visibilitymap_pin(reln, block, &vmbuffer);
    9724          22 :         visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
    9725             : 
    9726          22 :         ReleaseBuffer(vmbuffer);
    9727          22 :         FreeFakeRelcacheEntry(reln);
    9728             :     }
    9729             : 
    9730      109678 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    9731             :     {
    9732      109564 :         page = (Page) BufferGetPage(buffer);
    9733             : 
    9734      109564 :         offnum = xlrec->offnum;
    9735      109564 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    9736      109564 :             lp = PageGetItemId(page, offnum);
    9737             : 
    9738      109564 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    9739           0 :             elog(PANIC, "invalid lp");
    9740             : 
    9741      109564 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    9742             : 
    9743      109564 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    9744      109564 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    9745      109564 :         fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
    9746             :                                    &htup->t_infomask2);
    9747             : 
    9748             :         /*
    9749             :          * Clear relevant update flags, but only if the modified infomask says
    9750             :          * there's no update.
    9751             :          */
    9752      109564 :         if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
    9753             :         {
    9754      109564 :             HeapTupleHeaderClearHotUpdated(htup);
    9755             :             /* Make sure there is no forward chain link in t_ctid */
    9756      109564 :             ItemPointerSet(&htup->t_ctid,
    9757             :                            BufferGetBlockNumber(buffer),
    9758             :                            offnum);
    9759             :         }
    9760      109564 :         HeapTupleHeaderSetXmax(htup, xlrec->xmax);
    9761      109564 :         HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
    9762      109564 :         PageSetLSN(page, lsn);
    9763      109564 :         MarkBufferDirty(buffer);
    9764             :     }
    9765      109678 :     if (BufferIsValid(buffer))
    9766      109678 :         UnlockReleaseBuffer(buffer);
    9767      109678 : }
    9768             : 
    9769             : static void
    9770           0 : heap_xlog_lock_updated(XLogReaderState *record)
    9771             : {
    9772           0 :     XLogRecPtr  lsn = record->EndRecPtr;
    9773             :     xl_heap_lock_updated *xlrec;
    9774             :     Buffer      buffer;
    9775             :     Page        page;
    9776             :     OffsetNumber offnum;
    9777           0 :     ItemId      lp = NULL;
    9778             :     HeapTupleHeader htup;
    9779             : 
    9780           0 :     xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
    9781             : 
    9782             :     /*
    9783             :      * The visibility map may need to be fixed even if the heap page is
    9784             :      * already up-to-date.
    9785             :      */
    9786           0 :     if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
    9787             :     {
    9788             :         RelFileLocator rlocator;
    9789           0 :         Buffer      vmbuffer = InvalidBuffer;
    9790             :         BlockNumber block;
    9791             :         Relation    reln;
    9792             : 
    9793           0 :         XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
    9794           0 :         reln = CreateFakeRelcacheEntry(rlocator);
    9795             : 
    9796           0 :         visibilitymap_pin(reln, block, &vmbuffer);
    9797           0 :         visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
    9798             : 
    9799           0 :         ReleaseBuffer(vmbuffer);
    9800           0 :         FreeFakeRelcacheEntry(reln);
    9801             :     }
    9802             : 
    9803           0 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    9804             :     {
    9805           0 :         page = BufferGetPage(buffer);
    9806             : 
    9807           0 :         offnum = xlrec->offnum;
    9808           0 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    9809           0 :             lp = PageGetItemId(page, offnum);
    9810             : 
    9811           0 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    9812           0 :             elog(PANIC, "invalid lp");
    9813             : 
    9814           0 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    9815             : 
    9816           0 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    9817           0 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    9818           0 :         fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
    9819             :                                    &htup->t_infomask2);
    9820           0 :         HeapTupleHeaderSetXmax(htup, xlrec->xmax);
    9821             : 
    9822           0 :         PageSetLSN(page, lsn);
    9823           0 :         MarkBufferDirty(buffer);
    9824             :     }
    9825           0 :     if (BufferIsValid(buffer))
    9826           0 :         UnlockReleaseBuffer(buffer);
    9827           0 : }
    9828             : 
    9829             : static void
    9830       13776 : heap_xlog_inplace(XLogReaderState *record)
    9831             : {
    9832       13776 :     XLogRecPtr  lsn = record->EndRecPtr;
    9833       13776 :     xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
    9834             :     Buffer      buffer;
    9835             :     Page        page;
    9836             :     OffsetNumber offnum;
    9837       13776 :     ItemId      lp = NULL;
    9838             :     HeapTupleHeader htup;
    9839             :     uint32      oldlen;
    9840             :     Size        newlen;
    9841             : 
    9842       13776 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    9843             :     {
    9844       13704 :         char       *newtup = XLogRecGetBlockData(record, 0, &newlen);
    9845             : 
    9846       13704 :         page = BufferGetPage(buffer);
    9847             : 
    9848       13704 :         offnum = xlrec->offnum;
    9849       13704 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    9850       13704 :             lp = PageGetItemId(page, offnum);
    9851             : 
    9852       13704 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    9853           0 :             elog(PANIC, "invalid lp");
    9854             : 
    9855       13704 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    9856             : 
    9857       13704 :         oldlen = ItemIdGetLength(lp) - htup->t_hoff;
    9858       13704 :         if (oldlen != newlen)
    9859           0 :             elog(PANIC, "wrong tuple length");
    9860             : 
    9861       13704 :         memcpy((char *) htup + htup->t_hoff, newtup, newlen);
    9862             : 
    9863       13704 :         PageSetLSN(page, lsn);
    9864       13704 :         MarkBufferDirty(buffer);
    9865             :     }
    9866       13776 :     if (BufferIsValid(buffer))
    9867       13776 :         UnlockReleaseBuffer(buffer);
    9868       13776 : }
    9869             : 
    9870             : void
    9871     3400072 : heap_redo(XLogReaderState *record)
    9872             : {
    9873     3400072 :     uint8       info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
    9874             : 
    9875             :     /*
    9876             :      * These operations don't overwrite MVCC data so no conflict processing is
    9877             :      * required. The ones in heap2 rmgr do.
    9878             :      */
    9879             : 
    9880     3400072 :     switch (info & XLOG_HEAP_OPMASK)
    9881             :     {
    9882     2510420 :         case XLOG_HEAP_INSERT:
    9883     2510420 :             heap_xlog_insert(record);
    9884     2510420 :             break;
    9885      580758 :         case XLOG_HEAP_DELETE:
    9886      580758 :             heap_xlog_delete(record);
    9887      580758 :             break;
    9888      113074 :         case XLOG_HEAP_UPDATE:
    9889      113074 :             heap_xlog_update(record, false);
    9890      113074 :             break;
    9891           4 :         case XLOG_HEAP_TRUNCATE:
    9892             : 
    9893             :             /*
    9894             :              * TRUNCATE is a no-op because the actions are already logged as
    9895             :              * SMGR WAL records.  TRUNCATE WAL record only exists for logical
    9896             :              * decoding.
    9897             :              */
    9898           4 :             break;
    9899       72240 :         case XLOG_HEAP_HOT_UPDATE:
    9900       72240 :             heap_xlog_update(record, true);
    9901       72240 :             break;
    9902         122 :         case XLOG_HEAP_CONFIRM:
    9903         122 :             heap_xlog_confirm(record);
    9904         122 :             break;
    9905      109678 :         case XLOG_HEAP_LOCK:
    9906      109678 :             heap_xlog_lock(record);
    9907      109678 :             break;
    9908       13776 :         case XLOG_HEAP_INPLACE:
    9909       13776 :             heap_xlog_inplace(record);
    9910       13776 :             break;
    9911           0 :         default:
    9912           0 :             elog(PANIC, "heap_redo: unknown op code %u", info);
    9913             :     }
    9914     3400072 : }
    9915             : 
    9916             : void
    9917      130658 : heap2_redo(XLogReaderState *record)
    9918             : {
    9919      130658 :     uint8       info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
    9920             : 
    9921      130658 :     switch (info & XLOG_HEAP_OPMASK)
    9922             :     {
    9923       16326 :         case XLOG_HEAP2_PRUNE_ON_ACCESS:
    9924             :         case XLOG_HEAP2_PRUNE_VACUUM_SCAN:
    9925             :         case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP:
    9926       16326 :             heap_xlog_prune_freeze(record);
    9927       16326 :             break;
    9928        7366 :         case XLOG_HEAP2_VISIBLE:
    9929        7366 :             heap_xlog_visible(record);
    9930        7366 :             break;
    9931      105266 :         case XLOG_HEAP2_MULTI_INSERT:
    9932      105266 :             heap_xlog_multi_insert(record);
    9933      105266 :             break;
    9934           0 :         case XLOG_HEAP2_LOCK_UPDATED:
    9935           0 :             heap_xlog_lock_updated(record);
    9936           0 :             break;
    9937        1700 :         case XLOG_HEAP2_NEW_CID:
    9938             : 
    9939             :             /*
    9940             :              * Nothing to do on a real replay, only used during logical
    9941             :              * decoding.
    9942             :              */
    9943        1700 :             break;
    9944           0 :         case XLOG_HEAP2_REWRITE:
    9945           0 :             heap_xlog_logical_rewrite(record);
    9946           0 :             break;
    9947           0 :         default:
    9948           0 :             elog(PANIC, "heap2_redo: unknown op code %u", info);
    9949             :     }
    9950      130658 : }
    9951             : 
    9952             : /*
    9953             :  * Mask a heap page before performing consistency checks on it.
    9954             :  */
    9955             : void
    9956           0 : heap_mask(char *pagedata, BlockNumber blkno)
    9957             : {
    9958           0 :     Page        page = (Page) pagedata;
    9959             :     OffsetNumber off;
    9960             : 
    9961           0 :     mask_page_lsn_and_checksum(page);
    9962             : 
    9963           0 :     mask_page_hint_bits(page);
    9964           0 :     mask_unused_space(page);
    9965             : 
    9966           0 :     for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
    9967             :     {
    9968           0 :         ItemId      iid = PageGetItemId(page, off);
    9969             :         char       *page_item;
    9970             : 
    9971           0 :         page_item = (char *) (page + ItemIdGetOffset(iid));
    9972             : 
    9973           0 :         if (ItemIdIsNormal(iid))
    9974             :         {
    9975           0 :             HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
    9976             : 
    9977             :             /*
    9978             :              * If xmin of a tuple is not yet frozen, we should ignore
    9979             :              * differences in hint bits, since they can be set without
    9980             :              * emitting WAL.
    9981             :              */
    9982           0 :             if (!HeapTupleHeaderXminFrozen(page_htup))
    9983           0 :                 page_htup->t_infomask &= ~HEAP_XACT_MASK;
    9984             :             else
    9985             :             {
    9986             :                 /* Still we need to mask xmax hint bits. */
    9987           0 :                 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
    9988           0 :                 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
    9989             :             }
    9990             : 
    9991             :             /*
    9992             :              * During replay, we set Command Id to FirstCommandId. Hence, mask
    9993             :              * it. See heap_xlog_insert() for details.
    9994             :              */
    9995           0 :             page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
    9996             : 
    9997             :             /*
    9998             :              * For a speculative tuple, heap_insert() does not set ctid in the
    9999             :              * caller-passed heap tuple itself, leaving the ctid field to
   10000             :              * contain a speculative token value - a per-backend monotonically
   10001             :              * increasing identifier. Besides, it does not WAL-log ctid under
   10002             :              * any circumstances.
   10003             :              *
   10004             :              * During redo, heap_xlog_insert() sets t_ctid to current block
   10005             :              * number and self offset number. It doesn't care about any
   10006             :              * speculative insertions on the primary. Hence, we set t_ctid to
   10007             :              * current block number and self offset number to ignore any
   10008             :              * inconsistency.
   10009             :              */
   10010           0 :             if (HeapTupleHeaderIsSpeculative(page_htup))
   10011           0 :                 ItemPointerSet(&page_htup->t_ctid, blkno, off);
   10012             : 
   10013             :             /*
   10014             :              * NB: Not ignoring ctid changes due to the tuple having moved
   10015             :              * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
   10016             :              * important information that needs to be in-sync between primary
   10017             :              * and standby, and thus is WAL logged.
   10018             :              */
   10019             :         }
   10020             : 
   10021             :         /*
   10022             :          * Ignore any padding bytes after the tuple, when the length of the
   10023             :          * item is not MAXALIGNed.
   10024             :          */
   10025           0 :         if (ItemIdHasStorage(iid))
   10026             :         {
   10027           0 :             int         len = ItemIdGetLength(iid);
   10028           0 :             int         padlen = MAXALIGN(len) - len;
   10029             : 
   10030           0 :             if (padlen > 0)
   10031           0 :                 memset(page_item + len, MASK_MARKER, padlen);
   10032             :         }
   10033             :     }
   10034           0 : }
   10035             : 
   10036             : /*
   10037             :  * HeapCheckForSerializableConflictOut
   10038             :  *      We are reading a tuple.  If it's not visible, there may be a
   10039             :  *      rw-conflict out with the inserter.  Otherwise, if it is visible to us
   10040             :  *      but has been deleted, there may be a rw-conflict out with the deleter.
   10041             :  *
   10042             :  * We will determine the top level xid of the writing transaction with which
   10043             :  * we may be in conflict, and ask CheckForSerializableConflictOut() to check
   10044             :  * for overlap with our own transaction.
   10045             :  *
   10046             :  * This function should be called just about anywhere in heapam.c where a
   10047             :  * tuple has been read. The caller must hold at least a shared lock on the
   10048             :  * buffer, because this function might set hint bits on the tuple. There is
   10049             :  * currently no known reason to call this function from an index AM.
   10050             :  */
   10051             : void
   10052    55140584 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
   10053             :                                     HeapTuple tuple, Buffer buffer,
   10054             :                                     Snapshot snapshot)
   10055             : {
   10056             :     TransactionId xid;
   10057             :     HTSV_Result htsvResult;
   10058             : 
   10059    55140584 :     if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
   10060    55089890 :         return;
   10061             : 
   10062             :     /*
   10063             :      * Check to see whether the tuple has been written to by a concurrent
   10064             :      * transaction, either to create it not visible to us, or to delete it
   10065             :      * while it is visible to us.  The "visible" bool indicates whether the
   10066             :      * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
   10067             :      * is going on with it.
   10068             :      *
   10069             :      * In the event of a concurrently inserted tuple that also happens to have
   10070             :      * been concurrently updated (by a separate transaction), the xmin of the
   10071             :      * tuple will be used -- not the updater's xid.
   10072             :      */
   10073       50694 :     htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
   10074       50694 :     switch (htsvResult)
   10075             :     {
   10076       49090 :         case HEAPTUPLE_LIVE:
   10077       49090 :             if (visible)
   10078       49064 :                 return;
   10079          26 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
   10080          26 :             break;
   10081         704 :         case HEAPTUPLE_RECENTLY_DEAD:
   10082             :         case HEAPTUPLE_DELETE_IN_PROGRESS:
   10083         704 :             if (visible)
   10084         562 :                 xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
   10085             :             else
   10086         142 :                 xid = HeapTupleHeaderGetXmin(tuple->t_data);
   10087             : 
   10088         704 :             if (TransactionIdPrecedes(xid, TransactionXmin))
   10089             :             {
   10090             :                 /* This is like the HEAPTUPLE_DEAD case */
   10091             :                 Assert(!visible);
   10092         126 :                 return;
   10093             :             }
   10094         578 :             break;
   10095         652 :         case HEAPTUPLE_INSERT_IN_PROGRESS:
   10096         652 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
   10097         652 :             break;
   10098         248 :         case HEAPTUPLE_DEAD:
   10099             :             Assert(!visible);
   10100         248 :             return;
   10101           0 :         default:
   10102             : 
   10103             :             /*
   10104             :              * The only way to get to this default clause is if a new value is
   10105             :              * added to the enum type without adding it to this switch
   10106             :              * statement.  That's a bug, so elog.
   10107             :              */
   10108           0 :             elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
   10109             : 
   10110             :             /*
   10111             :              * In spite of having all enum values covered and calling elog on
   10112             :              * this default, some compilers think this is a code path which
   10113             :              * allows xid to be used below without initialization. Silence
   10114             :              * that warning.
   10115             :              */
   10116             :             xid = InvalidTransactionId;
   10117             :     }
   10118             : 
   10119             :     Assert(TransactionIdIsValid(xid));
   10120             :     Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
   10121             : 
   10122             :     /*
   10123             :      * Find top level xid.  Bail out if xid is too early to be a conflict, or
   10124             :      * if it's our own xid.
   10125             :      */
   10126        1256 :     if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
   10127         124 :         return;
   10128        1132 :     xid = SubTransGetTopmostTransaction(xid);
   10129        1132 :     if (TransactionIdPrecedes(xid, TransactionXmin))
   10130           0 :         return;
   10131             : 
   10132        1132 :     CheckForSerializableConflictOut(relation, xid, snapshot);
   10133             : }

Generated by: LCOV version 1.14