LCOV - code coverage report
Current view: top level - src/backend/access/heap - heapam.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 2456 2917 84.2 %
Date: 2020-06-01 00:06:26 Functions: 74 79 93.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * heapam.c
       4             :  *    heap access method code
       5             :  *
       6             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/heap/heapam.c
      12             :  *
      13             :  *
      14             :  * INTERFACE ROUTINES
      15             :  *      heap_beginscan  - begin relation scan
      16             :  *      heap_rescan     - restart a relation scan
      17             :  *      heap_endscan    - end relation scan
      18             :  *      heap_getnext    - retrieve next tuple in scan
      19             :  *      heap_fetch      - retrieve tuple with given tid
      20             :  *      heap_insert     - insert tuple into a relation
      21             :  *      heap_multi_insert - insert multiple tuples into a relation
      22             :  *      heap_delete     - delete a tuple from a relation
      23             :  *      heap_update     - replace a tuple in a relation with another tuple
      24             :  *
      25             :  * NOTES
      26             :  *    This file contains the heap_ routines which implement
      27             :  *    the POSTGRES heap access method used for all POSTGRES
      28             :  *    relations.
      29             :  *
      30             :  *-------------------------------------------------------------------------
      31             :  */
      32             : #include "postgres.h"
      33             : 
      34             : #include "access/bufmask.h"
      35             : #include "access/genam.h"
      36             : #include "access/heapam.h"
      37             : #include "access/heapam_xlog.h"
      38             : #include "access/heaptoast.h"
      39             : #include "access/hio.h"
      40             : #include "access/multixact.h"
      41             : #include "access/parallel.h"
      42             : #include "access/relscan.h"
      43             : #include "access/subtrans.h"
      44             : #include "access/sysattr.h"
      45             : #include "access/tableam.h"
      46             : #include "access/transam.h"
      47             : #include "access/valid.h"
      48             : #include "access/visibilitymap.h"
      49             : #include "access/xact.h"
      50             : #include "access/xlog.h"
      51             : #include "access/xloginsert.h"
      52             : #include "access/xlogutils.h"
      53             : #include "catalog/catalog.h"
      54             : #include "miscadmin.h"
      55             : #include "pgstat.h"
      56             : #include "port/atomics.h"
      57             : #include "storage/bufmgr.h"
      58             : #include "storage/freespace.h"
      59             : #include "storage/lmgr.h"
      60             : #include "storage/predicate.h"
      61             : #include "storage/procarray.h"
      62             : #include "storage/smgr.h"
      63             : #include "storage/spin.h"
      64             : #include "storage/standby.h"
      65             : #include "utils/datum.h"
      66             : #include "utils/inval.h"
      67             : #include "utils/lsyscache.h"
      68             : #include "utils/relcache.h"
      69             : #include "utils/snapmgr.h"
      70             : #include "utils/spccache.h"
      71             : 
      72             : 
      73             : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
      74             :                                      TransactionId xid, CommandId cid, int options);
      75             : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
      76             :                                   Buffer newbuf, HeapTuple oldtup,
      77             :                                   HeapTuple newtup, HeapTuple old_key_tuple,
      78             :                                   bool all_visible_cleared, bool new_all_visible_cleared);
      79             : static Bitmapset *HeapDetermineModifiedColumns(Relation relation,
      80             :                                                Bitmapset *interesting_cols,
      81             :                                                HeapTuple oldtup, HeapTuple newtup);
      82             : static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
      83             :                                  LockTupleMode mode, LockWaitPolicy wait_policy,
      84             :                                  bool *have_tuple_lock);
      85             : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
      86             :                                       uint16 old_infomask2, TransactionId add_to_xmax,
      87             :                                       LockTupleMode mode, bool is_update,
      88             :                                       TransactionId *result_xmax, uint16 *result_infomask,
      89             :                                       uint16 *result_infomask2);
      90             : static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
      91             :                                          ItemPointer ctid, TransactionId xid,
      92             :                                          LockTupleMode mode);
      93             : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
      94             :                                    uint16 *new_infomask2);
      95             : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
      96             :                                              uint16 t_infomask);
      97             : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
      98             :                                     LockTupleMode lockmode, bool *current_is_member);
      99             : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
     100             :                             Relation rel, ItemPointer ctid, XLTW_Oper oper,
     101             :                             int *remaining);
     102             : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
     103             :                                        uint16 infomask, Relation rel, int *remaining);
     104             : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
     105             : static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed,
     106             :                                         bool *copy);
     107             : 
     108             : 
     109             : /*
     110             :  * Each tuple lock mode has a corresponding heavyweight lock, and one or two
     111             :  * corresponding MultiXactStatuses (one to merely lock tuples, another one to
     112             :  * update them).  This table (and the macros below) helps us determine the
     113             :  * heavyweight lock mode and MultiXactStatus values to use for any particular
     114             :  * tuple lock strength.
     115             :  *
     116             :  * Don't look at lockstatus/updstatus directly!  Use get_mxact_status_for_lock
     117             :  * instead.
     118             :  */
     119             : static const struct
     120             : {
     121             :     LOCKMODE    hwlock;
     122             :     int         lockstatus;
     123             :     int         updstatus;
     124             : }
     125             : 
     126             :             tupleLockExtraInfo[MaxLockTupleMode + 1] =
     127             : {
     128             :     {                           /* LockTupleKeyShare */
     129             :         AccessShareLock,
     130             :         MultiXactStatusForKeyShare,
     131             :         -1                      /* KeyShare does not allow updating tuples */
     132             :     },
     133             :     {                           /* LockTupleShare */
     134             :         RowShareLock,
     135             :         MultiXactStatusForShare,
     136             :         -1                      /* Share does not allow updating tuples */
     137             :     },
     138             :     {                           /* LockTupleNoKeyExclusive */
     139             :         ExclusiveLock,
     140             :         MultiXactStatusForNoKeyUpdate,
     141             :         MultiXactStatusNoKeyUpdate
     142             :     },
     143             :     {                           /* LockTupleExclusive */
     144             :         AccessExclusiveLock,
     145             :         MultiXactStatusForUpdate,
     146             :         MultiXactStatusUpdate
     147             :     }
     148             : };
     149             : 
     150             : /* Get the LOCKMODE for a given MultiXactStatus */
     151             : #define LOCKMODE_from_mxstatus(status) \
     152             :             (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
     153             : 
     154             : /*
     155             :  * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
     156             :  * This is more readable than having every caller translate it to lock.h's
     157             :  * LOCKMODE.
     158             :  */
     159             : #define LockTupleTuplock(rel, tup, mode) \
     160             :     LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     161             : #define UnlockTupleTuplock(rel, tup, mode) \
     162             :     UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     163             : #define ConditionalLockTupleTuplock(rel, tup, mode) \
     164             :     ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
     165             : 
     166             : #ifdef USE_PREFETCH
     167             : /*
     168             :  * heap_compute_xid_horizon_for_tuples and xid_horizon_prefetch_buffer use
     169             :  * this structure to coordinate prefetching activity.
     170             :  */
     171             : typedef struct
     172             : {
     173             :     BlockNumber cur_hblkno;
     174             :     int         next_item;
     175             :     int         nitems;
     176             :     ItemPointerData *tids;
     177             : } XidHorizonPrefetchState;
     178             : #endif
     179             : 
     180             : /*
     181             :  * This table maps tuple lock strength values for each particular
     182             :  * MultiXactStatus value.
     183             :  */
     184             : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
     185             : {
     186             :     LockTupleKeyShare,          /* ForKeyShare */
     187             :     LockTupleShare,             /* ForShare */
     188             :     LockTupleNoKeyExclusive,    /* ForNoKeyUpdate */
     189             :     LockTupleExclusive,         /* ForUpdate */
     190             :     LockTupleNoKeyExclusive,    /* NoKeyUpdate */
     191             :     LockTupleExclusive          /* Update */
     192             : };
     193             : 
     194             : /* Get the LockTupleMode for a given MultiXactStatus */
     195             : #define TUPLOCK_from_mxstatus(status) \
     196             :             (MultiXactStatusLock[(status)])
     197             : 
     198             : /* ----------------------------------------------------------------
     199             :  *                       heap support routines
     200             :  * ----------------------------------------------------------------
     201             :  */
     202             : 
     203             : /* ----------------
     204             :  *      initscan - scan code common to heap_beginscan and heap_rescan
     205             :  * ----------------
     206             :  */
     207             : static void
     208     1814490 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
     209             : {
     210     1814490 :     ParallelBlockTableScanDesc bpscan = NULL;
     211             :     bool        allow_strat;
     212             :     bool        allow_sync;
     213             : 
     214             :     /*
     215             :      * Determine the number of blocks we have to scan.
     216             :      *
     217             :      * It is sufficient to do this once at scan start, since any tuples added
     218             :      * while the scan is in progress will be invisible to my snapshot anyway.
     219             :      * (That is not true when using a non-MVCC snapshot.  However, we couldn't
     220             :      * guarantee to return tuples added after scan start anyway, since they
     221             :      * might go into pages we already scanned.  To guarantee consistent
     222             :      * results for a non-MVCC snapshot, the caller must hold some higher-level
     223             :      * lock that ensures the interesting tuple(s) won't change.)
     224             :      */
     225     1814490 :     if (scan->rs_base.rs_parallel != NULL)
     226             :     {
     227        2242 :         bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     228        2242 :         scan->rs_nblocks = bpscan->phs_nblocks;
     229             :     }
     230             :     else
     231     1812248 :         scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
     232             : 
     233             :     /*
     234             :      * If the table is large relative to NBuffers, use a bulk-read access
     235             :      * strategy and enable synchronized scanning (see syncscan.c).  Although
     236             :      * the thresholds for these features could be different, we make them the
     237             :      * same so that there are only two behaviors to tune rather than four.
     238             :      * (However, some callers need to be able to disable one or both of these
     239             :      * behaviors, independently of the size of the table; also there is a GUC
     240             :      * variable that can disable synchronized scanning.)
     241             :      *
     242             :      * Note that table_block_parallelscan_initialize has a very similar test;
     243             :      * if you change this, consider changing that one, too.
     244             :      */
     245     1814490 :     if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
     246     1808382 :         scan->rs_nblocks > NBuffers / 4)
     247             :     {
     248        3500 :         allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
     249        3500 :         allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
     250             :     }
     251             :     else
     252     1810990 :         allow_strat = allow_sync = false;
     253             : 
     254     1814490 :     if (allow_strat)
     255             :     {
     256             :         /* During a rescan, keep the previous strategy object. */
     257        3500 :         if (scan->rs_strategy == NULL)
     258        3500 :             scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
     259             :     }
     260             :     else
     261             :     {
     262     1810990 :         if (scan->rs_strategy != NULL)
     263           0 :             FreeAccessStrategy(scan->rs_strategy);
     264     1810990 :         scan->rs_strategy = NULL;
     265             :     }
     266             : 
     267     1814490 :     if (scan->rs_base.rs_parallel != NULL)
     268             :     {
     269             :         /* For parallel scan, believe whatever ParallelTableScanDesc says. */
     270        2242 :         if (scan->rs_base.rs_parallel->phs_syncscan)
     271           4 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     272             :         else
     273        2238 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     274             :     }
     275     1812248 :     else if (keep_startblock)
     276             :     {
     277             :         /*
     278             :          * When rescanning, we want to keep the previous startblock setting,
     279             :          * so that rewinding a cursor doesn't generate surprising results.
     280             :          * Reset the active syncscan setting, though.
     281             :          */
     282      861390 :         if (allow_sync && synchronize_seqscans)
     283           0 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     284             :         else
     285      861390 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     286             :     }
     287      950858 :     else if (allow_sync && synchronize_seqscans)
     288             :     {
     289          20 :         scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
     290          20 :         scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
     291             :     }
     292             :     else
     293             :     {
     294      950838 :         scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     295      950838 :         scan->rs_startblock = 0;
     296             :     }
     297             : 
     298     1814490 :     scan->rs_numblocks = InvalidBlockNumber;
     299     1814490 :     scan->rs_inited = false;
     300     1814490 :     scan->rs_ctup.t_data = NULL;
     301     1814490 :     ItemPointerSetInvalid(&scan->rs_ctup.t_self);
     302     1814490 :     scan->rs_cbuf = InvalidBuffer;
     303     1814490 :     scan->rs_cblock = InvalidBlockNumber;
     304             : 
     305             :     /* page-at-a-time fields are always invalid when not rs_inited */
     306             : 
     307             :     /*
     308             :      * copy the scan key, if appropriate
     309             :      */
     310     1814490 :     if (key != NULL)
     311      683620 :         memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
     312             : 
     313             :     /*
     314             :      * Currently, we only have a stats counter for sequential heap scans (but
     315             :      * e.g for bitmap scans the underlying bitmap index scans will be counted,
     316             :      * and for sample scans we update stats for tuple fetches).
     317             :      */
     318     1814490 :     if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
     319     1754496 :         pgstat_count_heap_scan(scan->rs_base.rs_rd);
     320     1814490 : }
     321             : 
     322             : /*
     323             :  * heap_setscanlimits - restrict range of a heapscan
     324             :  *
     325             :  * startBlk is the page to start at
     326             :  * numBlks is number of pages to scan (InvalidBlockNumber means "all")
     327             :  */
     328             : void
     329         292 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
     330             : {
     331         292 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     332             : 
     333             :     Assert(!scan->rs_inited);    /* else too late to change */
     334             :     /* else rs_startblock is significant */
     335             :     Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
     336             : 
     337             :     /* Check startBlk is valid (but allow case of zero blocks...) */
     338             :     Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
     339             : 
     340         292 :     scan->rs_startblock = startBlk;
     341         292 :     scan->rs_numblocks = numBlks;
     342         292 : }
     343             : 
     344             : /*
     345             :  * heapgetpage - subroutine for heapgettup()
     346             :  *
     347             :  * This routine reads and pins the specified page of the relation.
     348             :  * In page-at-a-time mode it performs additional work, namely determining
     349             :  * which tuples on the page are visible.
     350             :  */
     351             : void
     352     5118926 : heapgetpage(TableScanDesc sscan, BlockNumber page)
     353             : {
     354     5118926 :     HeapScanDesc scan = (HeapScanDesc) sscan;
     355             :     Buffer      buffer;
     356             :     Snapshot    snapshot;
     357             :     Page        dp;
     358             :     int         lines;
     359             :     int         ntup;
     360             :     OffsetNumber lineoff;
     361             :     ItemId      lpp;
     362             :     bool        all_visible;
     363             : 
     364             :     Assert(page < scan->rs_nblocks);
     365             : 
     366             :     /* release previous scan buffer, if any */
     367     5118926 :     if (BufferIsValid(scan->rs_cbuf))
     368             :     {
     369     3734308 :         ReleaseBuffer(scan->rs_cbuf);
     370     3734308 :         scan->rs_cbuf = InvalidBuffer;
     371             :     }
     372             : 
     373             :     /*
     374             :      * Be sure to check for interrupts at least once per page.  Checks at
     375             :      * higher code levels won't be able to stop a seqscan that encounters many
     376             :      * pages' worth of consecutive dead tuples.
     377             :      */
     378     5118926 :     CHECK_FOR_INTERRUPTS();
     379             : 
     380             :     /* read page using selected strategy */
     381     5118926 :     scan->rs_cbuf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, page,
     382             :                                        RBM_NORMAL, scan->rs_strategy);
     383     5118926 :     scan->rs_cblock = page;
     384             : 
     385     5118926 :     if (!(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE))
     386      145164 :         return;
     387             : 
     388     4973762 :     buffer = scan->rs_cbuf;
     389     4973762 :     snapshot = scan->rs_base.rs_snapshot;
     390             : 
     391             :     /*
     392             :      * Prune and repair fragmentation for the whole page, if possible.
     393             :      */
     394     4973762 :     heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
     395             : 
     396             :     /*
     397             :      * We must hold share lock on the buffer content while examining tuple
     398             :      * visibility.  Afterwards, however, the tuples we have found to be
     399             :      * visible are guaranteed good as long as we hold the buffer pin.
     400             :      */
     401     4973762 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
     402             : 
     403     4973762 :     dp = BufferGetPage(buffer);
     404     4973762 :     TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
     405     4973748 :     lines = PageGetMaxOffsetNumber(dp);
     406     4973748 :     ntup = 0;
     407             : 
     408             :     /*
     409             :      * If the all-visible flag indicates that all tuples on the page are
     410             :      * visible to everyone, we can skip the per-tuple visibility tests.
     411             :      *
     412             :      * Note: In hot standby, a tuple that's already visible to all
     413             :      * transactions in the master might still be invisible to a read-only
     414             :      * transaction in the standby. We partly handle this problem by tracking
     415             :      * the minimum xmin of visible tuples as the cut-off XID while marking a
     416             :      * page all-visible on master and WAL log that along with the visibility
     417             :      * map SET operation. In hot standby, we wait for (or abort) all
     418             :      * transactions that can potentially may not see one or more tuples on the
     419             :      * page. That's how index-only scans work fine in hot standby. A crucial
     420             :      * difference between index-only scans and heap scans is that the
     421             :      * index-only scan completely relies on the visibility map where as heap
     422             :      * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
     423             :      * the page-level flag can be trusted in the same way, because it might
     424             :      * get propagated somehow without being explicitly WAL-logged, e.g. via a
     425             :      * full page write. Until we can prove that beyond doubt, let's check each
     426             :      * tuple for visibility the hard way.
     427             :      */
     428     4973748 :     all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
     429             : 
     430     4973748 :     for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
     431   244642876 :          lineoff <= lines;
     432   239669128 :          lineoff++, lpp++)
     433             :     {
     434   239669144 :         if (ItemIdIsNormal(lpp))
     435             :         {
     436             :             HeapTupleData loctup;
     437             :             bool        valid;
     438             : 
     439   226515274 :             loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
     440   226515274 :             loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
     441   226515274 :             loctup.t_len = ItemIdGetLength(lpp);
     442   226515274 :             ItemPointerSet(&(loctup.t_self), page, lineoff);
     443             : 
     444   226515274 :             if (all_visible)
     445    37034170 :                 valid = true;
     446             :             else
     447   189481104 :                 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
     448             : 
     449   226515274 :             HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
     450             :                                                 &loctup, buffer, snapshot);
     451             : 
     452   226515258 :             if (valid)
     453   219232314 :                 scan->rs_vistuples[ntup++] = lineoff;
     454             :         }
     455             :     }
     456             : 
     457     4973732 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
     458             : 
     459             :     Assert(ntup <= MaxHeapTuplesPerPage);
     460     4973732 :     scan->rs_ntuples = ntup;
     461             : }
     462             : 
     463             : /* ----------------
     464             :  *      heapgettup - fetch next heap tuple
     465             :  *
     466             :  *      Initialize the scan if not already done; then advance to the next
     467             :  *      tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
     468             :  *      or set scan->rs_ctup.t_data = NULL if no more tuples.
     469             :  *
     470             :  * dir == NoMovementScanDirection means "re-fetch the tuple indicated
     471             :  * by scan->rs_ctup".
     472             :  *
     473             :  * Note: the reason nkeys/key are passed separately, even though they are
     474             :  * kept in the scan descriptor, is that the caller may not want us to check
     475             :  * the scankeys.
     476             :  *
     477             :  * Note: when we fall off the end of the scan in either direction, we
     478             :  * reset rs_inited.  This means that a further request with the same
     479             :  * scan direction will restart the scan, which is a bit odd, but a
     480             :  * request with the opposite scan direction will start a fresh scan
     481             :  * in the proper direction.  The latter is required behavior for cursors,
     482             :  * while the former case is generally undefined behavior in Postgres
     483             :  * so we don't care too much.
     484             :  * ----------------
     485             :  */
     486             : static void
     487    12190706 : heapgettup(HeapScanDesc scan,
     488             :            ScanDirection dir,
     489             :            int nkeys,
     490             :            ScanKey key)
     491             : {
     492    12190706 :     HeapTuple   tuple = &(scan->rs_ctup);
     493    12190706 :     Snapshot    snapshot = scan->rs_base.rs_snapshot;
     494    12190706 :     bool        backward = ScanDirectionIsBackward(dir);
     495             :     BlockNumber page;
     496             :     bool        finished;
     497             :     Page        dp;
     498             :     int         lines;
     499             :     OffsetNumber lineoff;
     500             :     int         linesleft;
     501             :     ItemId      lpp;
     502             : 
     503             :     /*
     504             :      * calculate next starting lineoff, given scan direction
     505             :      */
     506    12190706 :     if (ScanDirectionIsForward(dir))
     507             :     {
     508    12190706 :         if (!scan->rs_inited)
     509             :         {
     510             :             /*
     511             :              * return null immediately if relation is empty
     512             :              */
     513       24236 :             if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     514             :             {
     515             :                 Assert(!BufferIsValid(scan->rs_cbuf));
     516       19308 :                 tuple->t_data = NULL;
     517       19308 :                 return;
     518             :             }
     519        4928 :             if (scan->rs_base.rs_parallel != NULL)
     520             :             {
     521          40 :                 ParallelBlockTableScanDesc pbscan =
     522             :                 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     523             : 
     524          40 :                 table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
     525             :                                                          pbscan);
     526             : 
     527          40 :                 page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     528             :                                                          pbscan);
     529             : 
     530             :                 /* Other processes might have already finished the scan. */
     531          40 :                 if (page == InvalidBlockNumber)
     532             :                 {
     533             :                     Assert(!BufferIsValid(scan->rs_cbuf));
     534           8 :                     tuple->t_data = NULL;
     535           8 :                     return;
     536             :                 }
     537             :             }
     538             :             else
     539        4888 :                 page = scan->rs_startblock; /* first page */
     540        4920 :             heapgetpage((TableScanDesc) scan, page);
     541        4920 :             lineoff = FirstOffsetNumber;    /* first offnum */
     542        4920 :             scan->rs_inited = true;
     543             :         }
     544             :         else
     545             :         {
     546             :             /* continue from previously returned page/tuple */
     547    12166470 :             page = scan->rs_cblock; /* current page */
     548    12166470 :             lineoff =           /* next offnum */
     549    12166470 :                 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
     550             :         }
     551             : 
     552    12171390 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     553             : 
     554    12171390 :         dp = BufferGetPage(scan->rs_cbuf);
     555    12171390 :         TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
     556    12171390 :         lines = PageGetMaxOffsetNumber(dp);
     557             :         /* page and lineoff now reference the physically next tid */
     558             : 
     559    12171390 :         linesleft = lines - lineoff + 1;
     560             :     }
     561           0 :     else if (backward)
     562             :     {
     563             :         /* backward parallel scan not supported */
     564             :         Assert(scan->rs_base.rs_parallel == NULL);
     565             : 
     566           0 :         if (!scan->rs_inited)
     567             :         {
     568             :             /*
     569             :              * return null immediately if relation is empty
     570             :              */
     571           0 :             if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     572             :             {
     573             :                 Assert(!BufferIsValid(scan->rs_cbuf));
     574           0 :                 tuple->t_data = NULL;
     575           0 :                 return;
     576             :             }
     577             : 
     578             :             /*
     579             :              * Disable reporting to syncscan logic in a backwards scan; it's
     580             :              * not very likely anyone else is doing the same thing at the same
     581             :              * time, and much more likely that we'll just bollix things for
     582             :              * forward scanners.
     583             :              */
     584           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     585             :             /* start from last page of the scan */
     586           0 :             if (scan->rs_startblock > 0)
     587           0 :                 page = scan->rs_startblock - 1;
     588             :             else
     589           0 :                 page = scan->rs_nblocks - 1;
     590           0 :             heapgetpage((TableScanDesc) scan, page);
     591             :         }
     592             :         else
     593             :         {
     594             :             /* continue from previously returned page/tuple */
     595           0 :             page = scan->rs_cblock; /* current page */
     596             :         }
     597             : 
     598           0 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     599             : 
     600           0 :         dp = BufferGetPage(scan->rs_cbuf);
     601           0 :         TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
     602           0 :         lines = PageGetMaxOffsetNumber(dp);
     603             : 
     604           0 :         if (!scan->rs_inited)
     605             :         {
     606           0 :             lineoff = lines;    /* final offnum */
     607           0 :             scan->rs_inited = true;
     608             :         }
     609             :         else
     610             :         {
     611           0 :             lineoff =           /* previous offnum */
     612           0 :                 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
     613             :         }
     614             :         /* page and lineoff now reference the physically previous tid */
     615             : 
     616           0 :         linesleft = lineoff;
     617             :     }
     618             :     else
     619             :     {
     620             :         /*
     621             :          * ``no movement'' scan direction: refetch prior tuple
     622             :          */
     623           0 :         if (!scan->rs_inited)
     624             :         {
     625             :             Assert(!BufferIsValid(scan->rs_cbuf));
     626           0 :             tuple->t_data = NULL;
     627           0 :             return;
     628             :         }
     629             : 
     630           0 :         page = ItemPointerGetBlockNumber(&(tuple->t_self));
     631           0 :         if (page != scan->rs_cblock)
     632           0 :             heapgetpage((TableScanDesc) scan, page);
     633             : 
     634             :         /* Since the tuple was previously fetched, needn't lock page here */
     635           0 :         dp = BufferGetPage(scan->rs_cbuf);
     636           0 :         TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
     637           0 :         lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
     638           0 :         lpp = PageGetItemId(dp, lineoff);
     639             :         Assert(ItemIdIsNormal(lpp));
     640             : 
     641           0 :         tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
     642           0 :         tuple->t_len = ItemIdGetLength(lpp);
     643             : 
     644           0 :         return;
     645             :     }
     646             : 
     647             :     /*
     648             :      * advance the scan until we find a qualifying tuple or run out of stuff
     649             :      * to scan
     650             :      */
     651    12171390 :     lpp = PageGetItemId(dp, lineoff);
     652             :     for (;;)
     653             :     {
     654    12585634 :         while (linesleft > 0)
     655             :         {
     656    12443510 :             if (ItemIdIsNormal(lpp))
     657             :             {
     658             :                 bool        valid;
     659             : 
     660    12177000 :                 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
     661    12177000 :                 tuple->t_len = ItemIdGetLength(lpp);
     662    12177000 :                 ItemPointerSet(&(tuple->t_self), page, lineoff);
     663             : 
     664             :                 /*
     665             :                  * if current tuple qualifies, return it.
     666             :                  */
     667    12177000 :                 valid = HeapTupleSatisfiesVisibility(tuple,
     668             :                                                      snapshot,
     669             :                                                      scan->rs_cbuf);
     670             : 
     671    12177000 :                 HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
     672             :                                                     tuple, scan->rs_cbuf,
     673             :                                                     snapshot);
     674             : 
     675    12177000 :                 if (valid && key != NULL)
     676           0 :                     HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
     677             :                                 nkeys, key, valid);
     678             : 
     679    12177000 :                 if (valid)
     680             :                 {
     681    12166718 :                     LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
     682    12166718 :                     return;
     683             :                 }
     684             :             }
     685             : 
     686             :             /*
     687             :              * otherwise move to the next item on the page
     688             :              */
     689      276792 :             --linesleft;
     690      276792 :             if (backward)
     691             :             {
     692           0 :                 --lpp;          /* move back in this page's ItemId array */
     693           0 :                 --lineoff;
     694             :             }
     695             :             else
     696             :             {
     697      276792 :                 ++lpp;          /* move forward in this page's ItemId array */
     698      276792 :                 ++lineoff;
     699             :             }
     700             :         }
     701             : 
     702             :         /*
     703             :          * if we get here, it means we've exhausted the items on this page and
     704             :          * it's time to move to the next.
     705             :          */
     706      142124 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
     707             : 
     708             :         /*
     709             :          * advance to next/prior page and detect end of scan
     710             :          */
     711      142124 :         if (backward)
     712             :         {
     713           0 :             finished = (page == scan->rs_startblock) ||
     714           0 :                 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
     715           0 :             if (page == 0)
     716           0 :                 page = scan->rs_nblocks;
     717           0 :             page--;
     718             :         }
     719      142124 :         else if (scan->rs_base.rs_parallel != NULL)
     720             :         {
     721       36324 :             ParallelBlockTableScanDesc pbscan =
     722             :             (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     723             : 
     724       36324 :             page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     725             :                                                      pbscan);
     726       36324 :             finished = (page == InvalidBlockNumber);
     727             :         }
     728             :         else
     729             :         {
     730      105800 :             page++;
     731      105800 :             if (page >= scan->rs_nblocks)
     732        4610 :                 page = 0;
     733      207002 :             finished = (page == scan->rs_startblock) ||
     734      101202 :                 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
     735             : 
     736             :             /*
     737             :              * Report our new scan position for synchronization purposes. We
     738             :              * don't do that when moving backwards, however. That would just
     739             :              * mess up any other forward-moving scanners.
     740             :              *
     741             :              * Note: we do this before checking for end of scan so that the
     742             :              * final state of the position hint is back at the start of the
     743             :              * rel.  That's not strictly necessary, but otherwise when you run
     744             :              * the same query multiple times the starting position would shift
     745             :              * a little bit backwards on every invocation, which is confusing.
     746             :              * We don't guarantee any specific ordering in general, though.
     747             :              */
     748      105800 :             if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
     749           0 :                 ss_report_location(scan->rs_base.rs_rd, page);
     750             :         }
     751             : 
     752             :         /*
     753             :          * return NULL if we've exhausted all the pages
     754             :          */
     755      142124 :         if (finished)
     756             :         {
     757        4672 :             if (BufferIsValid(scan->rs_cbuf))
     758        4672 :                 ReleaseBuffer(scan->rs_cbuf);
     759        4672 :             scan->rs_cbuf = InvalidBuffer;
     760        4672 :             scan->rs_cblock = InvalidBlockNumber;
     761        4672 :             tuple->t_data = NULL;
     762        4672 :             scan->rs_inited = false;
     763        4672 :             return;
     764             :         }
     765             : 
     766      137452 :         heapgetpage((TableScanDesc) scan, page);
     767             : 
     768      137452 :         LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
     769             : 
     770      137452 :         dp = BufferGetPage(scan->rs_cbuf);
     771      137452 :         TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
     772      137452 :         lines = PageGetMaxOffsetNumber((Page) dp);
     773      137452 :         linesleft = lines;
     774      137452 :         if (backward)
     775             :         {
     776           0 :             lineoff = lines;
     777           0 :             lpp = PageGetItemId(dp, lines);
     778             :         }
     779             :         else
     780             :         {
     781      137452 :             lineoff = FirstOffsetNumber;
     782      137452 :             lpp = PageGetItemId(dp, FirstOffsetNumber);
     783             :         }
     784             :     }
     785             : }
     786             : 
     787             : /* ----------------
     788             :  *      heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
     789             :  *
     790             :  *      Same API as heapgettup, but used in page-at-a-time mode
     791             :  *
     792             :  * The internal logic is much the same as heapgettup's too, but there are some
     793             :  * differences: we do not take the buffer content lock (that only needs to
     794             :  * happen inside heapgetpage), and we iterate through just the tuples listed
     795             :  * in rs_vistuples[] rather than all tuples on the page.  Notice that
     796             :  * lineindex is 0-based, where the corresponding loop variable lineoff in
     797             :  * heapgettup is 1-based.
     798             :  * ----------------
     799             :  */
     800             : static void
     801    68032352 : heapgettup_pagemode(HeapScanDesc scan,
     802             :                     ScanDirection dir,
     803             :                     int nkeys,
     804             :                     ScanKey key)
     805             : {
     806    68032352 :     HeapTuple   tuple = &(scan->rs_ctup);
     807    68032352 :     bool        backward = ScanDirectionIsBackward(dir);
     808             :     BlockNumber page;
     809             :     bool        finished;
     810             :     Page        dp;
     811             :     int         lines;
     812             :     int         lineindex;
     813             :     OffsetNumber lineoff;
     814             :     int         linesleft;
     815             :     ItemId      lpp;
     816             : 
     817             :     /*
     818             :      * calculate next starting lineindex, given scan direction
     819             :      */
     820    68032352 :     if (ScanDirectionIsForward(dir))
     821             :     {
     822    68031876 :         if (!scan->rs_inited)
     823             :         {
     824             :             /*
     825             :              * return null immediately if relation is empty
     826             :              */
     827     1729214 :             if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     828             :             {
     829             :                 Assert(!BufferIsValid(scan->rs_cbuf));
     830      349042 :                 tuple->t_data = NULL;
     831      349042 :                 return;
     832             :             }
     833     1380172 :             if (scan->rs_base.rs_parallel != NULL)
     834             :             {
     835        1432 :                 ParallelBlockTableScanDesc pbscan =
     836             :                 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
     837             : 
     838        1432 :                 table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
     839             :                                                          pbscan);
     840             : 
     841        1432 :                 page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
     842             :                                                          pbscan);
     843             : 
     844             :                 /* Other processes might have already finished the scan. */
     845        1432 :                 if (page == InvalidBlockNumber)
     846             :                 {
     847             :                     Assert(!BufferIsValid(scan->rs_cbuf));
     848         624 :                     tuple->t_data = NULL;
     849         624 :                     return;
     850             :                 }
     851             :             }
     852             :             else
     853     1378740 :                 page = scan->rs_startblock; /* first page */
     854     1379548 :             heapgetpage((TableScanDesc) scan, page);
     855     1379518 :             lineindex = 0;
     856     1379518 :             scan->rs_inited = true;
     857             :         }
     858             :         else
     859             :         {
     860             :             /* continue from previously returned page/tuple */
     861    66302662 :             page = scan->rs_cblock; /* current page */
     862    66302662 :             lineindex = scan->rs_cindex + 1;
     863             :         }
     864             : 
     865    67682180 :         dp = BufferGetPage(scan->rs_cbuf);
     866    67682180 :         TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
     867    67682180 :         lines = scan->rs_ntuples;
     868             :         /* page and lineindex now reference the next visible tid */
     869             : 
     870    67682180 :         linesleft = lines - lineindex;
     871             :     }
     872         476 :     else if (backward)
     873             :     {
     874             :         /* backward parallel scan not supported */
     875             :         Assert(scan->rs_base.rs_parallel == NULL);
     876             : 
     877         476 :         if (!scan->rs_inited)
     878             :         {
     879             :             /*
     880             :              * return null immediately if relation is empty
     881             :              */
     882          40 :             if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
     883             :             {
     884             :                 Assert(!BufferIsValid(scan->rs_cbuf));
     885           0 :                 tuple->t_data = NULL;
     886           0 :                 return;
     887             :             }
     888             : 
     889             :             /*
     890             :              * Disable reporting to syncscan logic in a backwards scan; it's
     891             :              * not very likely anyone else is doing the same thing at the same
     892             :              * time, and much more likely that we'll just bollix things for
     893             :              * forward scanners.
     894             :              */
     895          40 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
     896             :             /* start from last page of the scan */
     897          40 :             if (scan->rs_startblock > 0)
     898           0 :                 page = scan->rs_startblock - 1;
     899             :             else
     900          40 :                 page = scan->rs_nblocks - 1;
     901          40 :             heapgetpage((TableScanDesc) scan, page);
     902             :         }
     903             :         else
     904             :         {
     905             :             /* continue from previously returned page/tuple */
     906         436 :             page = scan->rs_cblock; /* current page */
     907             :         }
     908             : 
     909         476 :         dp = BufferGetPage(scan->rs_cbuf);
     910         476 :         TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
     911         476 :         lines = scan->rs_ntuples;
     912             : 
     913         476 :         if (!scan->rs_inited)
     914             :         {
     915          40 :             lineindex = lines - 1;
     916          40 :             scan->rs_inited = true;
     917             :         }
     918             :         else
     919             :         {
     920         436 :             lineindex = scan->rs_cindex - 1;
     921             :         }
     922             :         /* page and lineindex now reference the previous visible tid */
     923             : 
     924         476 :         linesleft = lineindex + 1;
     925             :     }
     926             :     else
     927             :     {
     928             :         /*
     929             :          * ``no movement'' scan direction: refetch prior tuple
     930             :          */
     931           0 :         if (!scan->rs_inited)
     932             :         {
     933             :             Assert(!BufferIsValid(scan->rs_cbuf));
     934           0 :             tuple->t_data = NULL;
     935           0 :             return;
     936             :         }
     937             : 
     938           0 :         page = ItemPointerGetBlockNumber(&(tuple->t_self));
     939           0 :         if (page != scan->rs_cblock)
     940           0 :             heapgetpage((TableScanDesc) scan, page);
     941             : 
     942             :         /* Since the tuple was previously fetched, needn't lock page here */
     943           0 :         dp = BufferGetPage(scan->rs_cbuf);
     944           0 :         TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
     945           0 :         lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
     946           0 :         lpp = PageGetItemId(dp, lineoff);
     947             :         Assert(ItemIdIsNormal(lpp));
     948             : 
     949           0 :         tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
     950           0 :         tuple->t_len = ItemIdGetLength(lpp);
     951             : 
     952             :         /* check that rs_cindex is in sync */
     953             :         Assert(scan->rs_cindex < scan->rs_ntuples);
     954             :         Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
     955             : 
     956           0 :         return;
     957             :     }
     958             : 
     959             :     /*
     960             :      * advance the scan until we find a qualifying tuple or run out of stuff
     961             :      * to scan
     962             :      */
     963             :     for (;;)
     964             :     {
     965   213480444 :         while (linesleft > 0)
     966             :         {
     967   209012592 :             lineoff = scan->rs_vistuples[lineindex];
     968   209012592 :             lpp = PageGetItemId(dp, lineoff);
     969             :             Assert(ItemIdIsNormal(lpp));
     970             : 
     971   209012592 :             tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
     972   209012592 :             tuple->t_len = ItemIdGetLength(lpp);
     973   209012592 :             ItemPointerSet(&(tuple->t_self), page, lineoff);
     974             : 
     975             :             /*
     976             :              * if current tuple qualifies, return it.
     977             :              */
     978   209012592 :             if (key != NULL)
     979             :             {
     980             :                 bool        valid;
     981             : 
     982   145132450 :                 HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
     983             :                             nkeys, key, valid);
     984   143204174 :                 if (valid)
     985             :                 {
     986      994818 :                     scan->rs_cindex = lineindex;
     987      994818 :                     return;
     988             :                 }
     989             :             }
     990             :             else
     991             :             {
     992    65808418 :                 scan->rs_cindex = lineindex;
     993    65808418 :                 return;
     994             :             }
     995             : 
     996             :             /*
     997             :              * otherwise move to the next item on the page
     998             :              */
     999   142209356 :             --linesleft;
    1000   142209356 :             if (backward)
    1001           0 :                 --lineindex;
    1002             :             else
    1003   142209356 :                 ++lineindex;
    1004             :         }
    1005             : 
    1006             :         /*
    1007             :          * if we get here, it means we've exhausted the items on this page and
    1008             :          * it's time to move to the next.
    1009             :          */
    1010     4467852 :         if (backward)
    1011             :         {
    1012          60 :             finished = (page == scan->rs_startblock) ||
    1013           0 :                 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
    1014          60 :             if (page == 0)
    1015          60 :                 page = scan->rs_nblocks;
    1016          60 :             page--;
    1017             :         }
    1018     4467792 :         else if (scan->rs_base.rs_parallel != NULL)
    1019             :         {
    1020       80596 :             ParallelBlockTableScanDesc pbscan =
    1021             :             (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
    1022             : 
    1023       80596 :             page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
    1024             :                                                      pbscan);
    1025       80596 :             finished = (page == InvalidBlockNumber);
    1026             :         }
    1027             :         else
    1028             :         {
    1029     4387196 :             page++;
    1030     4387196 :             if (page >= scan->rs_nblocks)
    1031      878552 :                 page = 0;
    1032     7895840 :             finished = (page == scan->rs_startblock) ||
    1033     3508644 :                 (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
    1034             : 
    1035             :             /*
    1036             :              * Report our new scan position for synchronization purposes. We
    1037             :              * don't do that when moving backwards, however. That would just
    1038             :              * mess up any other forward-moving scanners.
    1039             :              *
    1040             :              * Note: we do this before checking for end of scan so that the
    1041             :              * final state of the position hint is back at the start of the
    1042             :              * rel.  That's not strictly necessary, but otherwise when you run
    1043             :              * the same query multiple times the starting position would shift
    1044             :              * a little bit backwards on every invocation, which is confusing.
    1045             :              * We don't guarantee any specific ordering in general, though.
    1046             :              */
    1047     4387196 :             if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
    1048        3710 :                 ss_report_location(scan->rs_base.rs_rd, page);
    1049             :         }
    1050             : 
    1051             :         /*
    1052             :          * return NULL if we've exhausted all the pages
    1053             :          */
    1054     4467852 :         if (finished)
    1055             :         {
    1056      879420 :             if (BufferIsValid(scan->rs_cbuf))
    1057      879420 :                 ReleaseBuffer(scan->rs_cbuf);
    1058      879420 :             scan->rs_cbuf = InvalidBuffer;
    1059      879420 :             scan->rs_cblock = InvalidBlockNumber;
    1060      879420 :             tuple->t_data = NULL;
    1061      879420 :             scan->rs_inited = false;
    1062      879420 :             return;
    1063             :         }
    1064             : 
    1065     3588432 :         heapgetpage((TableScanDesc) scan, page);
    1066             : 
    1067     3588432 :         dp = BufferGetPage(scan->rs_cbuf);
    1068     3588432 :         TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
    1069     3588432 :         lines = scan->rs_ntuples;
    1070     3588432 :         linesleft = lines;
    1071     3588432 :         if (backward)
    1072           0 :             lineindex = lines - 1;
    1073             :         else
    1074     3588432 :             lineindex = 0;
    1075             :     }
    1076             : }
    1077             : 
    1078             : 
    1079             : #if defined(DISABLE_COMPLEX_MACRO)
    1080             : /*
    1081             :  * This is formatted so oddly so that the correspondence to the macro
    1082             :  * definition in access/htup_details.h is maintained.
    1083             :  */
    1084             : Datum
    1085             : fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
    1086             :             bool *isnull)
    1087             : {
    1088             :     return (
    1089             :             (attnum) > 0 ?
    1090             :             (
    1091             :              (*(isnull) = false),
    1092             :              HeapTupleNoNulls(tup) ?
    1093             :              (
    1094             :               TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff >= 0 ?
    1095             :               (
    1096             :                fetchatt(TupleDescAttr((tupleDesc), (attnum) - 1),
    1097             :                         (char *) (tup)->t_data + (tup)->t_data->t_hoff +
    1098             :                         TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff)
    1099             :                )
    1100             :               :
    1101             :               nocachegetattr((tup), (attnum), (tupleDesc))
    1102             :               )
    1103             :              :
    1104             :              (
    1105             :               att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
    1106             :               (
    1107             :                (*(isnull) = true),
    1108             :                (Datum) NULL
    1109             :                )
    1110             :               :
    1111             :               (
    1112             :                nocachegetattr((tup), (attnum), (tupleDesc))
    1113             :                )
    1114             :               )
    1115             :              )
    1116             :             :
    1117             :             (
    1118             :              (Datum) NULL
    1119             :              )
    1120             :         );
    1121             : }
    1122             : #endif                          /* defined(DISABLE_COMPLEX_MACRO) */
    1123             : 
    1124             : 
    1125             : /* ----------------------------------------------------------------
    1126             :  *                   heap access method interface
    1127             :  * ----------------------------------------------------------------
    1128             :  */
    1129             : 
    1130             : 
    1131             : TableScanDesc
    1132      953028 : heap_beginscan(Relation relation, Snapshot snapshot,
    1133             :                int nkeys, ScanKey key,
    1134             :                ParallelTableScanDesc parallel_scan,
    1135             :                uint32 flags)
    1136             : {
    1137             :     HeapScanDesc scan;
    1138             : 
    1139             :     /*
    1140             :      * increment relation ref count while scanning relation
    1141             :      *
    1142             :      * This is just to make really sure the relcache entry won't go away while
    1143             :      * the scan has a pointer to it.  Caller should be holding the rel open
    1144             :      * anyway, so this is redundant in all normal scenarios...
    1145             :      */
    1146      953028 :     RelationIncrementReferenceCount(relation);
    1147             : 
    1148             :     /*
    1149             :      * allocate and initialize scan descriptor
    1150             :      */
    1151      953028 :     scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
    1152             : 
    1153      953028 :     scan->rs_base.rs_rd = relation;
    1154      953028 :     scan->rs_base.rs_snapshot = snapshot;
    1155      953028 :     scan->rs_base.rs_nkeys = nkeys;
    1156      953028 :     scan->rs_base.rs_flags = flags;
    1157      953028 :     scan->rs_base.rs_parallel = parallel_scan;
    1158      953028 :     scan->rs_strategy = NULL;    /* set in initscan */
    1159             : 
    1160             :     /*
    1161             :      * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
    1162             :      */
    1163      953028 :     if (!(snapshot && IsMVCCSnapshot(snapshot)))
    1164       58864 :         scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1165             : 
    1166             :     /*
    1167             :      * For seqscan and sample scans in a serializable transaction, acquire a
    1168             :      * predicate lock on the entire relation. This is required not only to
    1169             :      * lock all the matching tuples, but also to conflict with new insertions
    1170             :      * into the table. In an indexscan, we take page locks on the index pages
    1171             :      * covering the range specified in the scan qual, but in a heap scan there
    1172             :      * is nothing more fine-grained to lock. A bitmap scan is a different
    1173             :      * story, there we have already scanned the index and locked the index
    1174             :      * pages covering the predicate. But in that case we still have to lock
    1175             :      * any matching heap tuples. For sample scan we could optimize the locking
    1176             :      * to be at least page-level granularity, but we'd need to add per-tuple
    1177             :      * locking for that.
    1178             :      */
    1179      953028 :     if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
    1180             :     {
    1181             :         /*
    1182             :          * Ensure a missing snapshot is noticed reliably, even if the
    1183             :          * isolation mode means predicate locking isn't performed (and
    1184             :          * therefore the snapshot isn't used here).
    1185             :          */
    1186             :         Assert(snapshot);
    1187      895932 :         PredicateLockRelation(relation, snapshot);
    1188             :     }
    1189             : 
    1190             :     /* we only need to set this up once */
    1191      953028 :     scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
    1192             : 
    1193             :     /*
    1194             :      * we do this here instead of in initscan() because heap_rescan also calls
    1195             :      * initscan() and we don't want to allocate memory again
    1196             :      */
    1197      953028 :     if (nkeys > 0)
    1198      683590 :         scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
    1199             :     else
    1200      269438 :         scan->rs_base.rs_key = NULL;
    1201             : 
    1202      953028 :     initscan(scan, key, false);
    1203             : 
    1204      953028 :     return (TableScanDesc) scan;
    1205             : }
    1206             : 
    1207             : void
    1208      861462 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
    1209             :             bool allow_strat, bool allow_sync, bool allow_pagemode)
    1210             : {
    1211      861462 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1212             : 
    1213      861462 :     if (set_params)
    1214             :     {
    1215          26 :         if (allow_strat)
    1216          26 :             scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
    1217             :         else
    1218           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
    1219             : 
    1220          26 :         if (allow_sync)
    1221           8 :             scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
    1222             :         else
    1223          18 :             scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
    1224             : 
    1225          26 :         if (allow_pagemode && scan->rs_base.rs_snapshot &&
    1226          26 :             IsMVCCSnapshot(scan->rs_base.rs_snapshot))
    1227          26 :             scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
    1228             :         else
    1229           0 :             scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
    1230             :     }
    1231             : 
    1232             :     /*
    1233             :      * unpin scan buffers
    1234             :      */
    1235      861462 :     if (BufferIsValid(scan->rs_cbuf))
    1236        2924 :         ReleaseBuffer(scan->rs_cbuf);
    1237             : 
    1238             :     /*
    1239             :      * reinitialize scan descriptor
    1240             :      */
    1241      861462 :     initscan(scan, key, true);
    1242      861462 : }
    1243             : 
    1244             : void
    1245      951598 : heap_endscan(TableScanDesc sscan)
    1246             : {
    1247      951598 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1248             : 
    1249             :     /* Note: no locking manipulations needed */
    1250             : 
    1251             :     /*
    1252             :      * unpin scan buffers
    1253             :      */
    1254      951598 :     if (BufferIsValid(scan->rs_cbuf))
    1255      505642 :         ReleaseBuffer(scan->rs_cbuf);
    1256             : 
    1257             :     /*
    1258             :      * decrement relation reference count and free scan descriptor storage
    1259             :      */
    1260      951598 :     RelationDecrementReferenceCount(scan->rs_base.rs_rd);
    1261             : 
    1262      951598 :     if (scan->rs_base.rs_key)
    1263      683562 :         pfree(scan->rs_base.rs_key);
    1264             : 
    1265      951598 :     if (scan->rs_strategy != NULL)
    1266        3500 :         FreeAccessStrategy(scan->rs_strategy);
    1267             : 
    1268      951598 :     if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
    1269      141792 :         UnregisterSnapshot(scan->rs_base.rs_snapshot);
    1270             : 
    1271      951598 :     pfree(scan);
    1272      951598 : }
    1273             : 
    1274             : HeapTuple
    1275    20575740 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
    1276             : {
    1277    20575740 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1278             : 
    1279             :     /*
    1280             :      * This is still widely used directly, without going through table AM, so
    1281             :      * add a safety check.  It's possible we should, at a later point,
    1282             :      * downgrade this to an assert. The reason for checking the AM routine,
    1283             :      * rather than the AM oid, is that this allows to write regression tests
    1284             :      * that create another AM reusing the heap handler.
    1285             :      */
    1286    20575740 :     if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
    1287           0 :         ereport(ERROR,
    1288             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1289             :                  errmsg_internal("only heap AM is supported")));
    1290             : 
    1291             :     /* Note: no locking manipulations needed */
    1292             : 
    1293    20575740 :     if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
    1294     9088908 :         heapgettup_pagemode(scan, direction,
    1295     9088908 :                             scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1296             :     else
    1297    11486832 :         heapgettup(scan, direction,
    1298    11486832 :                    scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
    1299             : 
    1300    20575740 :     if (scan->rs_ctup.t_data == NULL)
    1301      113478 :         return NULL;
    1302             : 
    1303             :     /*
    1304             :      * if we get here it means we have a new current scan tuple, so point to
    1305             :      * the proper return buffer and return the tuple.
    1306             :      */
    1307             : 
    1308    20462262 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1309             : 
    1310    20462262 :     return &scan->rs_ctup;
    1311             : }
    1312             : 
    1313             : bool
    1314    59647318 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
    1315             : {
    1316    59647318 :     HeapScanDesc scan = (HeapScanDesc) sscan;
    1317             : 
    1318             :     /* Note: no locking manipulations needed */
    1319             : 
    1320    59647318 :     if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
    1321    58943444 :         heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1322             :     else
    1323      703874 :         heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
    1324             : 
    1325    59647288 :     if (scan->rs_ctup.t_data == NULL)
    1326             :     {
    1327     1139596 :         ExecClearTuple(slot);
    1328     1139596 :         return false;
    1329             :     }
    1330             : 
    1331             :     /*
    1332             :      * if we get here it means we have a new current scan tuple, so point to
    1333             :      * the proper return buffer and return the tuple.
    1334             :      */
    1335             : 
    1336    58507692 :     pgstat_count_heap_getnext(scan->rs_base.rs_rd);
    1337             : 
    1338    58507692 :     ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
    1339             :                              scan->rs_cbuf);
    1340    58507692 :     return true;
    1341             : }
    1342             : 
    1343             : /*
    1344             :  *  heap_fetch      - retrieve tuple with given tid
    1345             :  *
    1346             :  * On entry, tuple->t_self is the TID to fetch.  We pin the buffer holding
    1347             :  * the tuple, fill in the remaining fields of *tuple, and check the tuple
    1348             :  * against the specified snapshot.
    1349             :  *
    1350             :  * If successful (tuple found and passes snapshot time qual), then *userbuf
    1351             :  * is set to the buffer holding the tuple and true is returned.  The caller
    1352             :  * must unpin the buffer when done with the tuple.
    1353             :  *
    1354             :  * If the tuple is not found (ie, item number references a deleted slot),
    1355             :  * then tuple->t_data is set to NULL and false is returned.
    1356             :  *
    1357             :  * If the tuple is found but fails the time qual check, then false is returned
    1358             :  * but tuple->t_data is left pointing to the tuple.
    1359             :  *
    1360             :  * heap_fetch does not follow HOT chains: only the exact TID requested will
    1361             :  * be fetched.
    1362             :  *
    1363             :  * It is somewhat inconsistent that we ereport() on invalid block number but
    1364             :  * return false on invalid item number.  There are a couple of reasons though.
    1365             :  * One is that the caller can relatively easily check the block number for
    1366             :  * validity, but cannot check the item number without reading the page
    1367             :  * himself.  Another is that when we are following a t_ctid link, we can be
    1368             :  * reasonably confident that the page number is valid (since VACUUM shouldn't
    1369             :  * truncate off the destination page without having killed the referencing
    1370             :  * tuple first), but the item number might well not be good.
    1371             :  */
    1372             : bool
    1373       15086 : heap_fetch(Relation relation,
    1374             :            Snapshot snapshot,
    1375             :            HeapTuple tuple,
    1376             :            Buffer *userbuf)
    1377             : {
    1378       15086 :     ItemPointer tid = &(tuple->t_self);
    1379             :     ItemId      lp;
    1380             :     Buffer      buffer;
    1381             :     Page        page;
    1382             :     OffsetNumber offnum;
    1383             :     bool        valid;
    1384             : 
    1385             :     /*
    1386             :      * Fetch and pin the appropriate page of the relation.
    1387             :      */
    1388       15086 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    1389             : 
    1390             :     /*
    1391             :      * Need share lock on buffer to examine tuple commit status.
    1392             :      */
    1393       15086 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1394       15086 :     page = BufferGetPage(buffer);
    1395       15086 :     TestForOldSnapshot(snapshot, relation, page);
    1396             : 
    1397             :     /*
    1398             :      * We'd better check for out-of-range offnum in case of VACUUM since the
    1399             :      * TID was obtained.
    1400             :      */
    1401       15086 :     offnum = ItemPointerGetOffsetNumber(tid);
    1402       15086 :     if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1403             :     {
    1404           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1405           0 :         ReleaseBuffer(buffer);
    1406           0 :         *userbuf = InvalidBuffer;
    1407           0 :         tuple->t_data = NULL;
    1408           0 :         return false;
    1409             :     }
    1410             : 
    1411             :     /*
    1412             :      * get the item line pointer corresponding to the requested tid
    1413             :      */
    1414       15086 :     lp = PageGetItemId(page, offnum);
    1415             : 
    1416             :     /*
    1417             :      * Must check for deleted tuple.
    1418             :      */
    1419       15086 :     if (!ItemIdIsNormal(lp))
    1420             :     {
    1421           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1422           0 :         ReleaseBuffer(buffer);
    1423           0 :         *userbuf = InvalidBuffer;
    1424           0 :         tuple->t_data = NULL;
    1425           0 :         return false;
    1426             :     }
    1427             : 
    1428             :     /*
    1429             :      * fill in *tuple fields
    1430             :      */
    1431       15086 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1432       15086 :     tuple->t_len = ItemIdGetLength(lp);
    1433       15086 :     tuple->t_tableOid = RelationGetRelid(relation);
    1434             : 
    1435             :     /*
    1436             :      * check tuple visibility, then release lock
    1437             :      */
    1438       15086 :     valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
    1439             : 
    1440       15086 :     if (valid)
    1441       15014 :         PredicateLockTID(relation, &(tuple->t_self), snapshot,
    1442       15014 :                          HeapTupleHeaderGetXmin(tuple->t_data));
    1443             : 
    1444       15086 :     HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
    1445             : 
    1446       15086 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    1447             : 
    1448       15086 :     if (valid)
    1449             :     {
    1450             :         /*
    1451             :          * All checks passed, so return the tuple as valid. Caller is now
    1452             :          * responsible for releasing the buffer.
    1453             :          */
    1454       15014 :         *userbuf = buffer;
    1455             : 
    1456       15014 :         return true;
    1457             :     }
    1458             : 
    1459             :     /* Tuple failed time qual */
    1460          72 :     ReleaseBuffer(buffer);
    1461          72 :     *userbuf = InvalidBuffer;
    1462             : 
    1463          72 :     return false;
    1464             : }
    1465             : 
    1466             : /*
    1467             :  *  heap_hot_search_buffer  - search HOT chain for tuple satisfying snapshot
    1468             :  *
    1469             :  * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
    1470             :  * of a HOT chain), and buffer is the buffer holding this tuple.  We search
    1471             :  * for the first chain member satisfying the given snapshot.  If one is
    1472             :  * found, we update *tid to reference that tuple's offset number, and
    1473             :  * return true.  If no match, return false without modifying *tid.
    1474             :  *
    1475             :  * heapTuple is a caller-supplied buffer.  When a match is found, we return
    1476             :  * the tuple here, in addition to updating *tid.  If no match is found, the
    1477             :  * contents of this buffer on return are undefined.
    1478             :  *
    1479             :  * If all_dead is not NULL, we check non-visible tuples to see if they are
    1480             :  * globally dead; *all_dead is set true if all members of the HOT chain
    1481             :  * are vacuumable, false if not.
    1482             :  *
    1483             :  * Unlike heap_fetch, the caller must already have pin and (at least) share
    1484             :  * lock on the buffer; it is still pinned/locked at exit.  Also unlike
    1485             :  * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
    1486             :  */
    1487             : bool
    1488    25578842 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
    1489             :                        Snapshot snapshot, HeapTuple heapTuple,
    1490             :                        bool *all_dead, bool first_call)
    1491             : {
    1492    25578842 :     Page        dp = (Page) BufferGetPage(buffer);
    1493    25578842 :     TransactionId prev_xmax = InvalidTransactionId;
    1494             :     BlockNumber blkno;
    1495             :     OffsetNumber offnum;
    1496             :     bool        at_chain_start;
    1497             :     bool        valid;
    1498             :     bool        skip;
    1499             : 
    1500             :     /* If this is not the first call, previous call returned a (live!) tuple */
    1501    25578842 :     if (all_dead)
    1502    23797534 :         *all_dead = first_call;
    1503             : 
    1504    25578842 :     blkno = ItemPointerGetBlockNumber(tid);
    1505    25578842 :     offnum = ItemPointerGetOffsetNumber(tid);
    1506    25578842 :     at_chain_start = first_call;
    1507    25578842 :     skip = !first_call;
    1508             : 
    1509             :     Assert(TransactionIdIsValid(RecentGlobalXmin));
    1510             :     Assert(BufferGetBlockNumber(buffer) == blkno);
    1511             : 
    1512             :     /* Scan through possible multiple members of HOT-chain */
    1513             :     for (;;)
    1514      774964 :     {
    1515             :         ItemId      lp;
    1516             : 
    1517             :         /* check for bogus TID */
    1518    26353806 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
    1519             :             break;
    1520             : 
    1521    26353806 :         lp = PageGetItemId(dp, offnum);
    1522             : 
    1523             :         /* check for unused, dead, or redirected items */
    1524    26353806 :         if (!ItemIdIsNormal(lp))
    1525             :         {
    1526             :             /* We should only see a redirect at start of chain */
    1527      358226 :             if (ItemIdIsRedirected(lp) && at_chain_start)
    1528             :             {
    1529             :                 /* Follow the redirect */
    1530      246104 :                 offnum = ItemIdGetRedirect(lp);
    1531      246104 :                 at_chain_start = false;
    1532      246104 :                 continue;
    1533             :             }
    1534             :             /* else must be end of chain */
    1535      112122 :             break;
    1536             :         }
    1537             : 
    1538             :         /*
    1539             :          * Update heapTuple to point to the element of the HOT chain we're
    1540             :          * currently investigating. Having t_self set correctly is important
    1541             :          * because the SSI checks and the *Satisfies routine for historical
    1542             :          * MVCC snapshots need the correct tid to decide about the visibility.
    1543             :          */
    1544    25995580 :         heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
    1545    25995580 :         heapTuple->t_len = ItemIdGetLength(lp);
    1546    25995580 :         heapTuple->t_tableOid = RelationGetRelid(relation);
    1547    25995580 :         ItemPointerSet(&heapTuple->t_self, blkno, offnum);
    1548             : 
    1549             :         /*
    1550             :          * Shouldn't see a HEAP_ONLY tuple at chain start.
    1551             :          */
    1552    25995580 :         if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
    1553           0 :             break;
    1554             : 
    1555             :         /*
    1556             :          * The xmin should match the previous xmax value, else chain is
    1557             :          * broken.
    1558             :          */
    1559    26524440 :         if (TransactionIdIsValid(prev_xmax) &&
    1560      528860 :             !TransactionIdEquals(prev_xmax,
    1561             :                                  HeapTupleHeaderGetXmin(heapTuple->t_data)))
    1562           0 :             break;
    1563             : 
    1564             :         /*
    1565             :          * When first_call is true (and thus, skip is initially false) we'll
    1566             :          * return the first tuple we find.  But on later passes, heapTuple
    1567             :          * will initially be pointing to the tuple we returned last time.
    1568             :          * Returning it again would be incorrect (and would loop forever), so
    1569             :          * we skip it and return the next match we find.
    1570             :          */
    1571    25995580 :         if (!skip)
    1572             :         {
    1573             :             /* If it's visible per the snapshot, we must return it */
    1574    25852606 :             valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
    1575    25852606 :             HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
    1576             :                                                 buffer, snapshot);
    1577             : 
    1578    25852600 :             if (valid)
    1579             :             {
    1580    17397660 :                 ItemPointerSetOffsetNumber(tid, offnum);
    1581    17397660 :                 PredicateLockTID(relation, &heapTuple->t_self, snapshot,
    1582    17397660 :                                  HeapTupleHeaderGetXmin(heapTuple->t_data));
    1583    17397660 :                 if (all_dead)
    1584    15727144 :                     *all_dead = false;
    1585    17397660 :                 return true;
    1586             :             }
    1587             :         }
    1588     8597914 :         skip = false;
    1589             : 
    1590             :         /*
    1591             :          * If we can't see it, maybe no one else can either.  At caller
    1592             :          * request, check whether all chain members are dead to all
    1593             :          * transactions.
    1594             :          *
    1595             :          * Note: if you change the criterion here for what is "dead", fix the
    1596             :          * planner's get_actual_variable_range() function to match.
    1597             :          */
    1598     8597914 :         if (all_dead && *all_dead &&
    1599     8255758 :             !HeapTupleIsSurelyDead(heapTuple, RecentGlobalXmin))
    1600     7994074 :             *all_dead = false;
    1601             : 
    1602             :         /*
    1603             :          * Check to see if HOT chain continues past this tuple; if so fetch
    1604             :          * the next offnum and loop around.
    1605             :          */
    1606     8597914 :         if (HeapTupleIsHotUpdated(heapTuple))
    1607             :         {
    1608             :             Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
    1609             :                    blkno);
    1610      528860 :             offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
    1611      528860 :             at_chain_start = false;
    1612      528860 :             prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
    1613             :         }
    1614             :         else
    1615             :             break;              /* end of chain */
    1616             :     }
    1617             : 
    1618     8181176 :     return false;
    1619             : }
    1620             : 
    1621             : /*
    1622             :  *  heap_get_latest_tid -  get the latest tid of a specified tuple
    1623             :  *
    1624             :  * Actually, this gets the latest version that is visible according to the
    1625             :  * scan's snapshot.  Create a scan using SnapshotDirty to get the very latest,
    1626             :  * possibly uncommitted version.
    1627             :  *
    1628             :  * *tid is both an input and an output parameter: it is updated to
    1629             :  * show the latest version of the row.  Note that it will not be changed
    1630             :  * if no version of the row passes the snapshot test.
    1631             :  */
    1632             : void
    1633         180 : heap_get_latest_tid(TableScanDesc sscan,
    1634             :                     ItemPointer tid)
    1635             : {
    1636         180 :     Relation    relation = sscan->rs_rd;
    1637         180 :     Snapshot    snapshot = sscan->rs_snapshot;
    1638             :     ItemPointerData ctid;
    1639             :     TransactionId priorXmax;
    1640             : 
    1641             :     /*
    1642             :      * table_get_latest_tid verified that the passed in tid is valid.  Assume
    1643             :      * that t_ctid links are valid however - there shouldn't be invalid ones
    1644             :      * in the table.
    1645             :      */
    1646             :     Assert(ItemPointerIsValid(tid));
    1647             : 
    1648             :     /*
    1649             :      * Loop to chase down t_ctid links.  At top of loop, ctid is the tuple we
    1650             :      * need to examine, and *tid is the TID we will return if ctid turns out
    1651             :      * to be bogus.
    1652             :      *
    1653             :      * Note that we will loop until we reach the end of the t_ctid chain.
    1654             :      * Depending on the snapshot passed, there might be at most one visible
    1655             :      * version of the row, but we don't try to optimize for that.
    1656             :      */
    1657         180 :     ctid = *tid;
    1658         180 :     priorXmax = InvalidTransactionId;   /* cannot check first XMIN */
    1659             :     for (;;)
    1660          60 :     {
    1661             :         Buffer      buffer;
    1662             :         Page        page;
    1663             :         OffsetNumber offnum;
    1664             :         ItemId      lp;
    1665             :         HeapTupleData tp;
    1666             :         bool        valid;
    1667             : 
    1668             :         /*
    1669             :          * Read, pin, and lock the page.
    1670             :          */
    1671         240 :         buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
    1672         240 :         LockBuffer(buffer, BUFFER_LOCK_SHARE);
    1673         240 :         page = BufferGetPage(buffer);
    1674         240 :         TestForOldSnapshot(snapshot, relation, page);
    1675             : 
    1676             :         /*
    1677             :          * Check for bogus item number.  This is not treated as an error
    1678             :          * condition because it can happen while following a t_ctid link. We
    1679             :          * just assume that the prior tid is OK and return it unchanged.
    1680             :          */
    1681         240 :         offnum = ItemPointerGetOffsetNumber(&ctid);
    1682         240 :         if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
    1683             :         {
    1684           0 :             UnlockReleaseBuffer(buffer);
    1685           0 :             break;
    1686             :         }
    1687         240 :         lp = PageGetItemId(page, offnum);
    1688         240 :         if (!ItemIdIsNormal(lp))
    1689             :         {
    1690           0 :             UnlockReleaseBuffer(buffer);
    1691           0 :             break;
    1692             :         }
    1693             : 
    1694             :         /* OK to access the tuple */
    1695         240 :         tp.t_self = ctid;
    1696         240 :         tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    1697         240 :         tp.t_len = ItemIdGetLength(lp);
    1698         240 :         tp.t_tableOid = RelationGetRelid(relation);
    1699             : 
    1700             :         /*
    1701             :          * After following a t_ctid link, we might arrive at an unrelated
    1702             :          * tuple.  Check for XMIN match.
    1703             :          */
    1704         300 :         if (TransactionIdIsValid(priorXmax) &&
    1705          60 :             !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
    1706             :         {
    1707           0 :             UnlockReleaseBuffer(buffer);
    1708           0 :             break;
    1709             :         }
    1710             : 
    1711             :         /*
    1712             :          * Check tuple visibility; if visible, set it as the new result
    1713             :          * candidate.
    1714             :          */
    1715         240 :         valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
    1716         240 :         HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
    1717         240 :         if (valid)
    1718         164 :             *tid = ctid;
    1719             : 
    1720             :         /*
    1721             :          * If there's a valid t_ctid link, follow it, else we're done.
    1722             :          */
    1723         348 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    1724         108 :             HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
    1725         152 :             HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
    1726          76 :             ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
    1727             :         {
    1728         180 :             UnlockReleaseBuffer(buffer);
    1729         180 :             break;
    1730             :         }
    1731             : 
    1732          60 :         ctid = tp.t_data->t_ctid;
    1733          60 :         priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    1734          60 :         UnlockReleaseBuffer(buffer);
    1735             :     }                           /* end of loop */
    1736         180 : }
    1737             : 
    1738             : 
    1739             : /*
    1740             :  * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
    1741             :  *
    1742             :  * This is called after we have waited for the XMAX transaction to terminate.
    1743             :  * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
    1744             :  * be set on exit.  If the transaction committed, we set the XMAX_COMMITTED
    1745             :  * hint bit if possible --- but beware that that may not yet be possible,
    1746             :  * if the transaction committed asynchronously.
    1747             :  *
    1748             :  * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
    1749             :  * even if it commits.
    1750             :  *
    1751             :  * Hence callers should look only at XMAX_INVALID.
    1752             :  *
    1753             :  * Note this is not allowed for tuples whose xmax is a multixact.
    1754             :  */
    1755             : static void
    1756         250 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
    1757             : {
    1758             :     Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
    1759             :     Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
    1760             : 
    1761         250 :     if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
    1762             :     {
    1763         444 :         if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
    1764         194 :             TransactionIdDidCommit(xid))
    1765         144 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
    1766             :                                  xid);
    1767             :         else
    1768         106 :             HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
    1769             :                                  InvalidTransactionId);
    1770             :     }
    1771         250 : }
    1772             : 
    1773             : 
    1774             : /*
    1775             :  * GetBulkInsertState - prepare status object for a bulk insert
    1776             :  */
    1777             : BulkInsertState
    1778        2372 : GetBulkInsertState(void)
    1779             : {
    1780             :     BulkInsertState bistate;
    1781             : 
    1782        2372 :     bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
    1783        2372 :     bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
    1784        2372 :     bistate->current_buf = InvalidBuffer;
    1785        2372 :     return bistate;
    1786             : }
    1787             : 
    1788             : /*
    1789             :  * FreeBulkInsertState - clean up after finishing a bulk insert
    1790             :  */
    1791             : void
    1792        2302 : FreeBulkInsertState(BulkInsertState bistate)
    1793             : {
    1794        2302 :     if (bistate->current_buf != InvalidBuffer)
    1795        1892 :         ReleaseBuffer(bistate->current_buf);
    1796        2302 :     FreeAccessStrategy(bistate->strategy);
    1797        2302 :     pfree(bistate);
    1798        2302 : }
    1799             : 
    1800             : /*
    1801             :  * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
    1802             :  */
    1803             : void
    1804      100232 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
    1805             : {
    1806      100232 :     if (bistate->current_buf != InvalidBuffer)
    1807          32 :         ReleaseBuffer(bistate->current_buf);
    1808      100232 :     bistate->current_buf = InvalidBuffer;
    1809      100232 : }
    1810             : 
    1811             : 
    1812             : /*
    1813             :  *  heap_insert     - insert tuple into a heap
    1814             :  *
    1815             :  * The new tuple is stamped with current transaction ID and the specified
    1816             :  * command ID.
    1817             :  *
    1818             :  * See table_tuple_insert for comments about most of the input flags, except
    1819             :  * that this routine directly takes a tuple rather than a slot.
    1820             :  *
    1821             :  * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
    1822             :  * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
    1823             :  * implement table_tuple_insert_speculative().
    1824             :  *
    1825             :  * On return the header fields of *tup are updated to match the stored tuple;
    1826             :  * in particular tup->t_self receives the actual TID where the tuple was
    1827             :  * stored.  But note that any toasting of fields within the tuple data is NOT
    1828             :  * reflected into *tup.
    1829             :  */
    1830             : void
    1831    18837388 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
    1832             :             int options, BulkInsertState bistate)
    1833             : {
    1834    18837388 :     TransactionId xid = GetCurrentTransactionId();
    1835             :     HeapTuple   heaptup;
    1836             :     Buffer      buffer;
    1837    18837388 :     Buffer      vmbuffer = InvalidBuffer;
    1838    18837388 :     bool        all_visible_cleared = false;
    1839             : 
    1840             :     /*
    1841             :      * Fill in tuple header fields and toast the tuple if necessary.
    1842             :      *
    1843             :      * Note: below this point, heaptup is the data we actually intend to store
    1844             :      * into the relation; tup is the caller's original untoasted data.
    1845             :      */
    1846    18837388 :     heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
    1847             : 
    1848             :     /*
    1849             :      * Find buffer to insert this tuple into.  If the page is all visible,
    1850             :      * this will also pin the requisite visibility map page.
    1851             :      */
    1852    18837388 :     buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
    1853             :                                        InvalidBuffer, options, bistate,
    1854             :                                        &vmbuffer, NULL);
    1855             : 
    1856             :     /*
    1857             :      * We're about to do the actual insert -- but check for conflict first, to
    1858             :      * avoid possibly having to roll back work we've just done.
    1859             :      *
    1860             :      * This is safe without a recheck as long as there is no possibility of
    1861             :      * another process scanning the page between this check and the insert
    1862             :      * being visible to the scan (i.e., an exclusive buffer content lock is
    1863             :      * continuously held from this point until the tuple insert is visible).
    1864             :      *
    1865             :      * For a heap insert, we only need to check for table-level SSI locks. Our
    1866             :      * new tuple can't possibly conflict with existing tuple locks, and heap
    1867             :      * page locks are only consolidated versions of tuple locks; they do not
    1868             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    1869             :      * buffer when making the call, which makes for a faster check.
    1870             :      */
    1871    18837388 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    1872             : 
    1873             :     /* NO EREPORT(ERROR) from here till changes are logged */
    1874    18837366 :     START_CRIT_SECTION();
    1875             : 
    1876    18837366 :     RelationPutHeapTuple(relation, buffer, heaptup,
    1877    18837366 :                          (options & HEAP_INSERT_SPECULATIVE) != 0);
    1878             : 
    1879    18837366 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    1880             :     {
    1881        8888 :         all_visible_cleared = true;
    1882        8888 :         PageClearAllVisible(BufferGetPage(buffer));
    1883       17776 :         visibilitymap_clear(relation,
    1884        8888 :                             ItemPointerGetBlockNumber(&(heaptup->t_self)),
    1885             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    1886             :     }
    1887             : 
    1888             :     /*
    1889             :      * XXX Should we set PageSetPrunable on this page ?
    1890             :      *
    1891             :      * The inserting transaction may eventually abort thus making this tuple
    1892             :      * DEAD and hence available for pruning. Though we don't want to optimize
    1893             :      * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
    1894             :      * aborted tuple will never be pruned until next vacuum is triggered.
    1895             :      *
    1896             :      * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
    1897             :      */
    1898             : 
    1899    18837366 :     MarkBufferDirty(buffer);
    1900             : 
    1901             :     /* XLOG stuff */
    1902    18837366 :     if (RelationNeedsWAL(relation))
    1903             :     {
    1904             :         xl_heap_insert xlrec;
    1905             :         xl_heap_header xlhdr;
    1906             :         XLogRecPtr  recptr;
    1907    18019958 :         Page        page = BufferGetPage(buffer);
    1908    18019958 :         uint8       info = XLOG_HEAP_INSERT;
    1909    18019958 :         int         bufflags = 0;
    1910             : 
    1911             :         /*
    1912             :          * If this is a catalog, we need to transmit combocids to properly
    1913             :          * decode, so log that as well.
    1914             :          */
    1915    18019958 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    1916       18486 :             log_heap_new_cid(relation, heaptup);
    1917             : 
    1918             :         /*
    1919             :          * If this is the single and first tuple on page, we can reinit the
    1920             :          * page instead of restoring the whole thing.  Set flag, and hide
    1921             :          * buffer references from XLogInsert.
    1922             :          */
    1923    18019958 :         if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
    1924      239592 :             PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    1925             :         {
    1926      236386 :             info |= XLOG_HEAP_INIT_PAGE;
    1927      236386 :             bufflags |= REGBUF_WILL_INIT;
    1928             :         }
    1929             : 
    1930    18019958 :         xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
    1931    18019958 :         xlrec.flags = 0;
    1932    18019958 :         if (all_visible_cleared)
    1933        8888 :             xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
    1934    18019958 :         if (options & HEAP_INSERT_SPECULATIVE)
    1935        3890 :             xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
    1936             :         Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
    1937             : 
    1938             :         /*
    1939             :          * For logical decoding, we need the tuple even if we're doing a full
    1940             :          * page write, so make sure it's included even if we take a full-page
    1941             :          * image. (XXX We could alternatively store a pointer into the FPW).
    1942             :          */
    1943    18019958 :         if (RelationIsLogicallyLogged(relation) &&
    1944      269728 :             !(options & HEAP_INSERT_NO_LOGICAL))
    1945             :         {
    1946      269692 :             xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    1947      269692 :             bufflags |= REGBUF_KEEP_DATA;
    1948             :         }
    1949             : 
    1950    18019958 :         XLogBeginInsert();
    1951    18019958 :         XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
    1952             : 
    1953    18019958 :         xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
    1954    18019958 :         xlhdr.t_infomask = heaptup->t_data->t_infomask;
    1955    18019958 :         xlhdr.t_hoff = heaptup->t_data->t_hoff;
    1956             : 
    1957             :         /*
    1958             :          * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
    1959             :          * write the whole page to the xlog, we don't need to store
    1960             :          * xl_heap_header in the xlog.
    1961             :          */
    1962    18019958 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    1963    18019958 :         XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
    1964             :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    1965    36039916 :         XLogRegisterBufData(0,
    1966    18019958 :                             (char *) heaptup->t_data + SizeofHeapTupleHeader,
    1967    18019958 :                             heaptup->t_len - SizeofHeapTupleHeader);
    1968             : 
    1969             :         /* filtering by origin on a row level is much more efficient */
    1970    18019958 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    1971             : 
    1972    18019958 :         recptr = XLogInsert(RM_HEAP_ID, info);
    1973             : 
    1974    18019958 :         PageSetLSN(page, recptr);
    1975             :     }
    1976             : 
    1977    18837366 :     END_CRIT_SECTION();
    1978             : 
    1979    18837366 :     UnlockReleaseBuffer(buffer);
    1980    18837366 :     if (vmbuffer != InvalidBuffer)
    1981        8940 :         ReleaseBuffer(vmbuffer);
    1982             : 
    1983             :     /*
    1984             :      * If tuple is cachable, mark it for invalidation from the caches in case
    1985             :      * we abort.  Note it is OK to do this after releasing the buffer, because
    1986             :      * the heaptup data structure is all in local memory, not in the shared
    1987             :      * buffer.
    1988             :      */
    1989    18837366 :     CacheInvalidateHeapTuple(relation, heaptup, NULL);
    1990             : 
    1991             :     /* Note: speculative insertions are counted too, even if aborted later */
    1992    18837366 :     pgstat_count_heap_insert(relation, 1);
    1993             : 
    1994             :     /*
    1995             :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    1996             :      * back to the caller's image, too.
    1997             :      */
    1998    18837366 :     if (heaptup != tup)
    1999             :     {
    2000       59136 :         tup->t_self = heaptup->t_self;
    2001       59136 :         heap_freetuple(heaptup);
    2002             :     }
    2003    18837366 : }
    2004             : 
    2005             : /*
    2006             :  * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
    2007             :  * tuple header fields and toasts the tuple if necessary.  Returns a toasted
    2008             :  * version of the tuple if it was toasted, or the original tuple if not. Note
    2009             :  * that in any case, the header fields are also set in the original tuple.
    2010             :  */
    2011             : static HeapTuple
    2012    19992922 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
    2013             :                     CommandId cid, int options)
    2014             : {
    2015             :     /*
    2016             :      * Parallel operations are required to be strictly read-only in a parallel
    2017             :      * worker.  Parallel inserts are not safe even in the leader in the
    2018             :      * general case, because group locking means that heavyweight locks for
    2019             :      * relation extension or GIN page locks will not conflict between members
    2020             :      * of a lock group, but we don't prohibit that case here because there are
    2021             :      * useful special cases that we can safely allow, such as CREATE TABLE AS.
    2022             :      */
    2023    19992922 :     if (IsParallelWorker())
    2024           0 :         ereport(ERROR,
    2025             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2026             :                  errmsg("cannot insert tuples in a parallel worker")));
    2027             : 
    2028    19992922 :     tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    2029    19992922 :     tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    2030    19992922 :     tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
    2031    19992922 :     HeapTupleHeaderSetXmin(tup->t_data, xid);
    2032    19992922 :     if (options & HEAP_INSERT_FROZEN)
    2033         246 :         HeapTupleHeaderSetXminFrozen(tup->t_data);
    2034             : 
    2035    19992922 :     HeapTupleHeaderSetCmin(tup->t_data, cid);
    2036    19992922 :     HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
    2037    19992922 :     tup->t_tableOid = RelationGetRelid(relation);
    2038             : 
    2039             :     /*
    2040             :      * If the new tuple is too big for storage or contains already toasted
    2041             :      * out-of-line attributes from some other relation, invoke the toaster.
    2042             :      */
    2043    19992922 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    2044      104496 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    2045             :     {
    2046             :         /* toast table entries should never be recursively toasted */
    2047             :         Assert(!HeapTupleHasExternal(tup));
    2048      102674 :         return tup;
    2049             :     }
    2050    19890248 :     else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
    2051       59162 :         return heap_toast_insert_or_update(relation, tup, NULL, options);
    2052             :     else
    2053    19831086 :         return tup;
    2054             : }
    2055             : 
    2056             : /*
    2057             :  *  heap_multi_insert   - insert multiple tuple into a heap
    2058             :  *
    2059             :  * This is like heap_insert(), but inserts multiple tuples in one operation.
    2060             :  * That's faster than calling heap_insert() in a loop, because when multiple
    2061             :  * tuples can be inserted on a single page, we can write just a single WAL
    2062             :  * record covering all of them, and only need to lock/unlock the page once.
    2063             :  *
    2064             :  * Note: this leaks memory into the current memory context. You can create a
    2065             :  * temporary context before calling this, if that's a problem.
    2066             :  */
    2067             : void
    2068        1836 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
    2069             :                   CommandId cid, int options, BulkInsertState bistate)
    2070             : {
    2071        1836 :     TransactionId xid = GetCurrentTransactionId();
    2072             :     HeapTuple  *heaptuples;
    2073             :     int         i;
    2074             :     int         ndone;
    2075             :     PGAlignedBlock scratch;
    2076             :     Page        page;
    2077             :     bool        needwal;
    2078             :     Size        saveFreeSpace;
    2079        1836 :     bool        need_tuple_data = RelationIsLogicallyLogged(relation);
    2080        1836 :     bool        need_cids = RelationIsAccessibleInLogicalDecoding(relation);
    2081             : 
    2082             :     /* currently not needed (thus unsupported) for heap_multi_insert() */
    2083             :     AssertArg(!(options & HEAP_INSERT_NO_LOGICAL));
    2084             : 
    2085        1836 :     needwal = RelationNeedsWAL(relation);
    2086        1836 :     saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
    2087             :                                                    HEAP_DEFAULT_FILLFACTOR);
    2088             : 
    2089             :     /* Toast and set header data in all the slots */
    2090        1836 :     heaptuples = palloc(ntuples * sizeof(HeapTuple));
    2091     1157370 :     for (i = 0; i < ntuples; i++)
    2092             :     {
    2093             :         HeapTuple   tuple;
    2094             : 
    2095     1155534 :         tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
    2096     1155534 :         slots[i]->tts_tableOid = RelationGetRelid(relation);
    2097     1155534 :         tuple->t_tableOid = slots[i]->tts_tableOid;
    2098     1155534 :         heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
    2099             :                                             options);
    2100             :     }
    2101             : 
    2102             :     /*
    2103             :      * We're about to do the actual inserts -- but check for conflict first,
    2104             :      * to minimize the possibility of having to roll back work we've just
    2105             :      * done.
    2106             :      *
    2107             :      * A check here does not definitively prevent a serialization anomaly;
    2108             :      * that check MUST be done at least past the point of acquiring an
    2109             :      * exclusive buffer content lock on every buffer that will be affected,
    2110             :      * and MAY be done after all inserts are reflected in the buffers and
    2111             :      * those locks are released; otherwise there is a race condition.  Since
    2112             :      * multiple buffers can be locked and unlocked in the loop below, and it
    2113             :      * would not be feasible to identify and lock all of those buffers before
    2114             :      * the loop, we must do a final check at the end.
    2115             :      *
    2116             :      * The check here could be omitted with no loss of correctness; it is
    2117             :      * present strictly as an optimization.
    2118             :      *
    2119             :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2120             :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2121             :      * page locks are only consolidated versions of tuple locks; they do not
    2122             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2123             :      * buffer when making the call, which makes for a faster check.
    2124             :      */
    2125        1836 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2126             : 
    2127        1836 :     ndone = 0;
    2128       18454 :     while (ndone < ntuples)
    2129             :     {
    2130             :         Buffer      buffer;
    2131       16618 :         Buffer      vmbuffer = InvalidBuffer;
    2132       16618 :         bool        all_visible_cleared = false;
    2133             :         int         nthispage;
    2134             : 
    2135       16618 :         CHECK_FOR_INTERRUPTS();
    2136             : 
    2137             :         /*
    2138             :          * Find buffer where at least the next tuple will fit.  If the page is
    2139             :          * all-visible, this will also pin the requisite visibility map page.
    2140             :          */
    2141       16618 :         buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
    2142             :                                            InvalidBuffer, options, bistate,
    2143             :                                            &vmbuffer, NULL);
    2144       16618 :         page = BufferGetPage(buffer);
    2145             : 
    2146             :         /* NO EREPORT(ERROR) from here till changes are logged */
    2147       16618 :         START_CRIT_SECTION();
    2148             : 
    2149             :         /*
    2150             :          * RelationGetBufferForTuple has ensured that the first tuple fits.
    2151             :          * Put that on the page, and then as many other tuples as fit.
    2152             :          */
    2153       16618 :         RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
    2154             : 
    2155             :         /*
    2156             :          * Note that heap_multi_insert is not used for catalog tuples yet, but
    2157             :          * this will cover the gap once that is the case.
    2158             :          */
    2159       16618 :         if (needwal && need_cids)
    2160           0 :             log_heap_new_cid(relation, heaptuples[ndone]);
    2161             : 
    2162     1155534 :         for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
    2163             :         {
    2164     1153698 :             HeapTuple   heaptup = heaptuples[ndone + nthispage];
    2165             : 
    2166     1153698 :             if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
    2167       14782 :                 break;
    2168             : 
    2169     1138916 :             RelationPutHeapTuple(relation, buffer, heaptup, false);
    2170             : 
    2171             :             /*
    2172             :              * We don't use heap_multi_insert for catalog tuples yet, but
    2173             :              * better be prepared...
    2174             :              */
    2175     1138916 :             if (needwal && need_cids)
    2176           0 :                 log_heap_new_cid(relation, heaptup);
    2177             :         }
    2178             : 
    2179       16618 :         if (PageIsAllVisible(page))
    2180             :         {
    2181           0 :             all_visible_cleared = true;
    2182           0 :             PageClearAllVisible(page);
    2183           0 :             visibilitymap_clear(relation,
    2184             :                                 BufferGetBlockNumber(buffer),
    2185             :                                 vmbuffer, VISIBILITYMAP_VALID_BITS);
    2186             :         }
    2187             : 
    2188             :         /*
    2189             :          * XXX Should we set PageSetPrunable on this page ? See heap_insert()
    2190             :          */
    2191             : 
    2192       16618 :         MarkBufferDirty(buffer);
    2193             : 
    2194             :         /* XLOG stuff */
    2195       16618 :         if (needwal)
    2196             :         {
    2197             :             XLogRecPtr  recptr;
    2198             :             xl_heap_multi_insert *xlrec;
    2199        9338 :             uint8       info = XLOG_HEAP2_MULTI_INSERT;
    2200             :             char       *tupledata;
    2201             :             int         totaldatalen;
    2202        9338 :             char       *scratchptr = scratch.data;
    2203             :             bool        init;
    2204        9338 :             int         bufflags = 0;
    2205             : 
    2206             :             /*
    2207             :              * If the page was previously empty, we can reinit the page
    2208             :              * instead of restoring the whole thing.
    2209             :              */
    2210       17678 :             init = (ItemPointerGetOffsetNumber(&(heaptuples[ndone]->t_self)) == FirstOffsetNumber &&
    2211        8340 :                     PageGetMaxOffsetNumber(page) == FirstOffsetNumber + nthispage - 1);
    2212             : 
    2213             :             /* allocate xl_heap_multi_insert struct from the scratch area */
    2214        9338 :             xlrec = (xl_heap_multi_insert *) scratchptr;
    2215        9338 :             scratchptr += SizeOfHeapMultiInsert;
    2216             : 
    2217             :             /*
    2218             :              * Allocate offsets array. Unless we're reinitializing the page,
    2219             :              * in that case the tuples are stored in order starting at
    2220             :              * FirstOffsetNumber and we don't need to store the offsets
    2221             :              * explicitly.
    2222             :              */
    2223        9338 :             if (!init)
    2224         998 :                 scratchptr += nthispage * sizeof(OffsetNumber);
    2225             : 
    2226             :             /* the rest of the scratch space is used for tuple data */
    2227        9338 :             tupledata = scratchptr;
    2228             : 
    2229        9338 :             xlrec->flags = all_visible_cleared ? XLH_INSERT_ALL_VISIBLE_CLEARED : 0;
    2230        9338 :             xlrec->ntuples = nthispage;
    2231             : 
    2232             :             /*
    2233             :              * Write out an xl_multi_insert_tuple and the tuple data itself
    2234             :              * for each tuple.
    2235             :              */
    2236      761572 :             for (i = 0; i < nthispage; i++)
    2237             :             {
    2238      752234 :                 HeapTuple   heaptup = heaptuples[ndone + i];
    2239             :                 xl_multi_insert_tuple *tuphdr;
    2240             :                 int         datalen;
    2241             : 
    2242      752234 :                 if (!init)
    2243       30982 :                     xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
    2244             :                 /* xl_multi_insert_tuple needs two-byte alignment. */
    2245      752234 :                 tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
    2246      752234 :                 scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
    2247             : 
    2248      752234 :                 tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
    2249      752234 :                 tuphdr->t_infomask = heaptup->t_data->t_infomask;
    2250      752234 :                 tuphdr->t_hoff = heaptup->t_data->t_hoff;
    2251             : 
    2252             :                 /* write bitmap [+ padding] [+ oid] + data */
    2253      752234 :                 datalen = heaptup->t_len - SizeofHeapTupleHeader;
    2254     1504468 :                 memcpy(scratchptr,
    2255      752234 :                        (char *) heaptup->t_data + SizeofHeapTupleHeader,
    2256             :                        datalen);
    2257      752234 :                 tuphdr->datalen = datalen;
    2258      752234 :                 scratchptr += datalen;
    2259             :             }
    2260        9338 :             totaldatalen = scratchptr - tupledata;
    2261             :             Assert((scratchptr - scratch.data) < BLCKSZ);
    2262             : 
    2263        9338 :             if (need_tuple_data)
    2264          38 :                 xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
    2265             : 
    2266             :             /*
    2267             :              * Signal that this is the last xl_heap_multi_insert record
    2268             :              * emitted by this call to heap_multi_insert(). Needed for logical
    2269             :              * decoding so it knows when to cleanup temporary data.
    2270             :              */
    2271        9338 :             if (ndone + nthispage == ntuples)
    2272        1180 :                 xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
    2273             : 
    2274        9338 :             if (init)
    2275             :             {
    2276        8340 :                 info |= XLOG_HEAP_INIT_PAGE;
    2277        8340 :                 bufflags |= REGBUF_WILL_INIT;
    2278             :             }
    2279             : 
    2280             :             /*
    2281             :              * If we're doing logical decoding, include the new tuple data
    2282             :              * even if we take a full-page image of the page.
    2283             :              */
    2284        9338 :             if (need_tuple_data)
    2285          38 :                 bufflags |= REGBUF_KEEP_DATA;
    2286             : 
    2287        9338 :             XLogBeginInsert();
    2288        9338 :             XLogRegisterData((char *) xlrec, tupledata - scratch.data);
    2289        9338 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
    2290             : 
    2291        9338 :             XLogRegisterBufData(0, tupledata, totaldatalen);
    2292             : 
    2293             :             /* filtering by origin on a row level is much more efficient */
    2294        9338 :             XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2295             : 
    2296        9338 :             recptr = XLogInsert(RM_HEAP2_ID, info);
    2297             : 
    2298        9338 :             PageSetLSN(page, recptr);
    2299             :         }
    2300             : 
    2301       16618 :         END_CRIT_SECTION();
    2302             : 
    2303       16618 :         UnlockReleaseBuffer(buffer);
    2304       16618 :         if (vmbuffer != InvalidBuffer)
    2305           0 :             ReleaseBuffer(vmbuffer);
    2306             : 
    2307       16618 :         ndone += nthispage;
    2308             :     }
    2309             : 
    2310             :     /*
    2311             :      * We're done with the actual inserts.  Check for conflicts again, to
    2312             :      * ensure that all rw-conflicts in to these inserts are detected.  Without
    2313             :      * this final check, a sequential scan of the heap may have locked the
    2314             :      * table after the "before" check, missing one opportunity to detect the
    2315             :      * conflict, and then scanned the table before the new tuples were there,
    2316             :      * missing the other chance to detect the conflict.
    2317             :      *
    2318             :      * For heap inserts, we only need to check for table-level SSI locks. Our
    2319             :      * new tuples can't possibly conflict with existing tuple locks, and heap
    2320             :      * page locks are only consolidated versions of tuple locks; they do not
    2321             :      * lock "gaps" as index page locks do.  So we don't need to specify a
    2322             :      * buffer when making the call.
    2323             :      */
    2324        1836 :     CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
    2325             : 
    2326             :     /*
    2327             :      * If tuples are cachable, mark them for invalidation from the caches in
    2328             :      * case we abort.  Note it is OK to do this after releasing the buffer,
    2329             :      * because the heaptuples data structure is all in local memory, not in
    2330             :      * the shared buffer.
    2331             :      */
    2332        1836 :     if (IsCatalogRelation(relation))
    2333             :     {
    2334           0 :         for (i = 0; i < ntuples; i++)
    2335           0 :             CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
    2336             :     }
    2337             : 
    2338             :     /* copy t_self fields back to the caller's slots */
    2339     1157370 :     for (i = 0; i < ntuples; i++)
    2340     1155534 :         slots[i]->tts_tid = heaptuples[i]->t_self;
    2341             : 
    2342        1836 :     pgstat_count_heap_insert(relation, ntuples);
    2343        1836 : }
    2344             : 
    2345             : /*
    2346             :  *  simple_heap_insert - insert a tuple
    2347             :  *
    2348             :  * Currently, this routine differs from heap_insert only in supplying
    2349             :  * a default command ID and not allowing access to the speedup options.
    2350             :  *
    2351             :  * This should be used rather than using heap_insert directly in most places
    2352             :  * where we are modifying system catalogs.
    2353             :  */
    2354             : void
    2355     6516852 : simple_heap_insert(Relation relation, HeapTuple tup)
    2356             : {
    2357     6516852 :     heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
    2358     6516852 : }
    2359             : 
    2360             : /*
    2361             :  * Given infomask/infomask2, compute the bits that must be saved in the
    2362             :  * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
    2363             :  * xl_heap_lock_updated WAL records.
    2364             :  *
    2365             :  * See fix_infomask_from_infobits.
    2366             :  */
    2367             : static uint8
    2368     2090652 : compute_infobits(uint16 infomask, uint16 infomask2)
    2369             : {
    2370             :     return
    2371     4181304 :         ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
    2372     4181304 :         ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
    2373     4181304 :         ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
    2374             :     /* note we ignore HEAP_XMAX_SHR_LOCK here */
    2375     6271956 :         ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
    2376             :         ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
    2377     2090652 :          XLHL_KEYS_UPDATED : 0);
    2378             : }
    2379             : 
    2380             : /*
    2381             :  * Given two versions of the same t_infomask for a tuple, compare them and
    2382             :  * return whether the relevant status for a tuple Xmax has changed.  This is
    2383             :  * used after a buffer lock has been released and reacquired: we want to ensure
    2384             :  * that the tuple state continues to be the same it was when we previously
    2385             :  * examined it.
    2386             :  *
    2387             :  * Note the Xmax field itself must be compared separately.
    2388             :  */
    2389             : static inline bool
    2390         444 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
    2391             : {
    2392         444 :     const uint16 interesting =
    2393             :     HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
    2394             : 
    2395         444 :     if ((new_infomask & interesting) != (old_infomask & interesting))
    2396          26 :         return true;
    2397             : 
    2398         418 :     return false;
    2399             : }
    2400             : 
    2401             : /*
    2402             :  *  heap_delete - delete a tuple
    2403             :  *
    2404             :  * See table_tuple_delete() for an explanation of the parameters, except that
    2405             :  * this routine directly takes a tuple rather than a slot.
    2406             :  *
    2407             :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    2408             :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    2409             :  * only for TM_SelfModified, since we cannot obtain cmax from a combocid
    2410             :  * generated by another transaction).
    2411             :  */
    2412             : TM_Result
    2413     1619356 : heap_delete(Relation relation, ItemPointer tid,
    2414             :             CommandId cid, Snapshot crosscheck, bool wait,
    2415             :             TM_FailureData *tmfd, bool changingPart)
    2416             : {
    2417             :     TM_Result   result;
    2418     1619356 :     TransactionId xid = GetCurrentTransactionId();
    2419             :     ItemId      lp;
    2420             :     HeapTupleData tp;
    2421             :     Page        page;
    2422             :     BlockNumber block;
    2423             :     Buffer      buffer;
    2424     1619356 :     Buffer      vmbuffer = InvalidBuffer;
    2425             :     TransactionId new_xmax;
    2426             :     uint16      new_infomask,
    2427             :                 new_infomask2;
    2428     1619356 :     bool        have_tuple_lock = false;
    2429             :     bool        iscombo;
    2430     1619356 :     bool        all_visible_cleared = false;
    2431     1619356 :     HeapTuple   old_key_tuple = NULL;   /* replica identity of the tuple */
    2432     1619356 :     bool        old_key_copied = false;
    2433             : 
    2434             :     Assert(ItemPointerIsValid(tid));
    2435             : 
    2436             :     /*
    2437             :      * Forbid this during a parallel operation, lest it allocate a combocid.
    2438             :      * Other workers might need that combocid for visibility checks, and we
    2439             :      * have no provision for broadcasting it to them.
    2440             :      */
    2441     1619356 :     if (IsInParallelMode())
    2442           0 :         ereport(ERROR,
    2443             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2444             :                  errmsg("cannot delete tuples during a parallel operation")));
    2445             : 
    2446     1619356 :     block = ItemPointerGetBlockNumber(tid);
    2447     1619356 :     buffer = ReadBuffer(relation, block);
    2448     1619356 :     page = BufferGetPage(buffer);
    2449             : 
    2450             :     /*
    2451             :      * Before locking the buffer, pin the visibility map page if it appears to
    2452             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    2453             :      * in the middle of changing this, so we'll need to recheck after we have
    2454             :      * the lock.
    2455             :      */
    2456     1619356 :     if (PageIsAllVisible(page))
    2457         694 :         visibilitymap_pin(relation, block, &vmbuffer);
    2458             : 
    2459     1619356 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2460             : 
    2461             :     /*
    2462             :      * If we didn't pin the visibility map page and the page has become all
    2463             :      * visible while we were busy locking the buffer, we'll have to unlock and
    2464             :      * re-lock, to avoid holding the buffer lock across an I/O.  That's a bit
    2465             :      * unfortunate, but hopefully shouldn't happen often.
    2466             :      */
    2467     1619356 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    2468             :     {
    2469           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2470           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    2471           0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2472             :     }
    2473             : 
    2474     1619356 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    2475             :     Assert(ItemIdIsNormal(lp));
    2476             : 
    2477     1619356 :     tp.t_tableOid = RelationGetRelid(relation);
    2478     1619356 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    2479     1619356 :     tp.t_len = ItemIdGetLength(lp);
    2480     1619356 :     tp.t_self = *tid;
    2481             : 
    2482     1619358 : l1:
    2483     1619358 :     result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
    2484             : 
    2485     1619358 :     if (result == TM_Invisible)
    2486             :     {
    2487           0 :         UnlockReleaseBuffer(buffer);
    2488           0 :         ereport(ERROR,
    2489             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    2490             :                  errmsg("attempted to delete invisible tuple")));
    2491             :     }
    2492     1619358 :     else if (result == TM_BeingModified && wait)
    2493             :     {
    2494             :         TransactionId xwait;
    2495             :         uint16      infomask;
    2496             : 
    2497             :         /* must copy state data before unlocking buffer */
    2498         742 :         xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
    2499         742 :         infomask = tp.t_data->t_infomask;
    2500             : 
    2501             :         /*
    2502             :          * Sleep until concurrent transaction ends -- except when there's a
    2503             :          * single locker and it's our own transaction.  Note we don't care
    2504             :          * which lock mode the locker has, because we need the strongest one.
    2505             :          *
    2506             :          * Before sleeping, we need to acquire tuple lock to establish our
    2507             :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    2508             :          * release us when we are next-in-line for the tuple.
    2509             :          *
    2510             :          * If we are forced to "start over" below, we keep the tuple lock;
    2511             :          * this arranges that we stay at the head of the line while rechecking
    2512             :          * tuple state.
    2513             :          */
    2514         742 :         if (infomask & HEAP_XMAX_IS_MULTI)
    2515             :         {
    2516          16 :             bool        current_is_member = false;
    2517             : 
    2518          16 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    2519             :                                         LockTupleExclusive, &current_is_member))
    2520             :             {
    2521          16 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2522             : 
    2523             :                 /*
    2524             :                  * Acquire the lock, if necessary (but skip it when we're
    2525             :                  * requesting a lock and already have one; avoids deadlock).
    2526             :                  */
    2527          16 :                 if (!current_is_member)
    2528          12 :                     heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    2529             :                                          LockWaitBlock, &have_tuple_lock);
    2530             : 
    2531             :                 /* wait for multixact */
    2532          16 :                 MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
    2533             :                                 relation, &(tp.t_self), XLTW_Delete,
    2534             :                                 NULL);
    2535          16 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2536             : 
    2537             :                 /*
    2538             :                  * If xwait had just locked the tuple then some other xact
    2539             :                  * could update this tuple before we get to this point.  Check
    2540             :                  * for xmax change, and start over if so.
    2541             :                  */
    2542          16 :                 if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    2543          16 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    2544             :                                          xwait))
    2545           0 :                     goto l1;
    2546             :             }
    2547             : 
    2548             :             /*
    2549             :              * You might think the multixact is necessarily done here, but not
    2550             :              * so: it could have surviving members, namely our own xact or
    2551             :              * other subxacts of this backend.  It is legal for us to delete
    2552             :              * the tuple in either case, however (the latter case is
    2553             :              * essentially a situation of upgrading our former shared lock to
    2554             :              * exclusive).  We don't bother changing the on-disk hint bits
    2555             :              * since we are about to overwrite the xmax altogether.
    2556             :              */
    2557             :         }
    2558         726 :         else if (!TransactionIdIsCurrentTransactionId(xwait))
    2559             :         {
    2560             :             /*
    2561             :              * Wait for regular transaction to end; but first, acquire tuple
    2562             :              * lock.
    2563             :              */
    2564          54 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2565          54 :             heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
    2566             :                                  LockWaitBlock, &have_tuple_lock);
    2567          54 :             XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
    2568          46 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2569             : 
    2570             :             /*
    2571             :              * xwait is done, but if xwait had just locked the tuple then some
    2572             :              * other xact could update this tuple before we get to this point.
    2573             :              * Check for xmax change, and start over if so.
    2574             :              */
    2575          46 :             if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
    2576          44 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
    2577             :                                      xwait))
    2578           2 :                 goto l1;
    2579             : 
    2580             :             /* Otherwise check if it committed or aborted */
    2581          44 :             UpdateXmaxHintBits(tp.t_data, buffer, xwait);
    2582             :         }
    2583             : 
    2584             :         /*
    2585             :          * We may overwrite if previous xmax aborted, or if it committed but
    2586             :          * only locked the tuple without updating it.
    2587             :          */
    2588         732 :         if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    2589         736 :             HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
    2590          28 :             HeapTupleHeaderIsOnlyLocked(tp.t_data))
    2591         712 :             result = TM_Ok;
    2592          20 :         else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid) ||
    2593           4 :                  HeapTupleHeaderIndicatesMovedPartitions(tp.t_data))
    2594          16 :             result = TM_Updated;
    2595             :         else
    2596           4 :             result = TM_Deleted;
    2597             :     }
    2598             : 
    2599     1619348 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    2600             :     {
    2601             :         /* Perform additional check for transaction-snapshot mode RI updates */
    2602           0 :         if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
    2603           0 :             result = TM_Updated;
    2604             :     }
    2605             : 
    2606     1619348 :     if (result != TM_Ok)
    2607             :     {
    2608             :         Assert(result == TM_SelfModified ||
    2609             :                result == TM_Updated ||
    2610             :                result == TM_Deleted ||
    2611             :                result == TM_BeingModified);
    2612             :         Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
    2613             :         Assert(result != TM_Updated ||
    2614             :                !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
    2615          44 :         tmfd->ctid = tp.t_data->t_ctid;
    2616          44 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
    2617          44 :         if (result == TM_SelfModified)
    2618          20 :             tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
    2619             :         else
    2620          24 :             tmfd->cmax = InvalidCommandId;
    2621          44 :         UnlockReleaseBuffer(buffer);
    2622          44 :         if (have_tuple_lock)
    2623          20 :             UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    2624          44 :         if (vmbuffer != InvalidBuffer)
    2625           0 :             ReleaseBuffer(vmbuffer);
    2626          44 :         return result;
    2627             :     }
    2628             : 
    2629             :     /*
    2630             :      * We're about to do the actual delete -- check for conflict first, to
    2631             :      * avoid possibly having to roll back work we've just done.
    2632             :      *
    2633             :      * This is safe without a recheck as long as there is no possibility of
    2634             :      * another process scanning the page between this check and the delete
    2635             :      * being visible to the scan (i.e., an exclusive buffer content lock is
    2636             :      * continuously held from this point until the tuple delete is visible).
    2637             :      */
    2638     1619304 :     CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
    2639             : 
    2640             :     /* replace cid with a combo cid if necessary */
    2641     1619276 :     HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
    2642             : 
    2643             :     /*
    2644             :      * Compute replica identity tuple before entering the critical section so
    2645             :      * we don't PANIC upon a memory allocation failure.
    2646             :      */
    2647     1619276 :     old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
    2648             : 
    2649             :     /*
    2650             :      * If this is the first possibly-multixact-able operation in the current
    2651             :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    2652             :      * certain that the transaction will never become a member of any older
    2653             :      * MultiXactIds than that.  (We have to do this even if we end up just
    2654             :      * using our own TransactionId below, since some other backend could
    2655             :      * incorporate our XID into a MultiXact immediately afterwards.)
    2656             :      */
    2657     1619276 :     MultiXactIdSetOldestMember();
    2658             : 
    2659     3238552 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
    2660     1619276 :                               tp.t_data->t_infomask, tp.t_data->t_infomask2,
    2661             :                               xid, LockTupleExclusive, true,
    2662             :                               &new_xmax, &new_infomask, &new_infomask2);
    2663             : 
    2664     1619276 :     START_CRIT_SECTION();
    2665             : 
    2666             :     /*
    2667             :      * If this transaction commits, the tuple will become DEAD sooner or
    2668             :      * later.  Set flag that this page is a candidate for pruning once our xid
    2669             :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    2670             :      * the subsequent page pruning will be a no-op and the hint will be
    2671             :      * cleared.
    2672             :      */
    2673     1619276 :     PageSetPrunable(page, xid);
    2674             : 
    2675     1619276 :     if (PageIsAllVisible(page))
    2676             :     {
    2677         694 :         all_visible_cleared = true;
    2678         694 :         PageClearAllVisible(page);
    2679         694 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    2680             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    2681             :     }
    2682             : 
    2683             :     /* store transaction information of xact deleting the tuple */
    2684     1619276 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    2685     1619276 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    2686     1619276 :     tp.t_data->t_infomask |= new_infomask;
    2687     1619276 :     tp.t_data->t_infomask2 |= new_infomask2;
    2688     1619276 :     HeapTupleHeaderClearHotUpdated(tp.t_data);
    2689     1619276 :     HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
    2690     1619276 :     HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
    2691             :     /* Make sure there is no forward chain link in t_ctid */
    2692     1619276 :     tp.t_data->t_ctid = tp.t_self;
    2693             : 
    2694             :     /* Signal that this is actually a move into another partition */
    2695     1619276 :     if (changingPart)
    2696         344 :         HeapTupleHeaderSetMovedPartitions(tp.t_data);
    2697             : 
    2698     1619276 :     MarkBufferDirty(buffer);
    2699             : 
    2700             :     /*
    2701             :      * XLOG stuff
    2702             :      *
    2703             :      * NB: heap_abort_speculative() uses the same xlog record and replay
    2704             :      * routines.
    2705             :      */
    2706     1619276 :     if (RelationNeedsWAL(relation))
    2707             :     {
    2708             :         xl_heap_delete xlrec;
    2709             :         xl_heap_header xlhdr;
    2710             :         XLogRecPtr  recptr;
    2711             : 
    2712             :         /* For logical decode we need combocids to properly decode the catalog */
    2713     1618612 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    2714        6760 :             log_heap_new_cid(relation, &tp);
    2715             : 
    2716     1618612 :         xlrec.flags = 0;
    2717     1618612 :         if (all_visible_cleared)
    2718         694 :             xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
    2719     1618612 :         if (changingPart)
    2720         344 :             xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
    2721     3237224 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    2722     1618612 :                                               tp.t_data->t_infomask2);
    2723     1618612 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    2724     1618612 :         xlrec.xmax = new_xmax;
    2725             : 
    2726     1618612 :         if (old_key_tuple != NULL)
    2727             :         {
    2728       10994 :             if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    2729         404 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
    2730             :             else
    2731       10590 :                 xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
    2732             :         }
    2733             : 
    2734     1618612 :         XLogBeginInsert();
    2735     1618612 :         XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
    2736             : 
    2737     1618612 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    2738             : 
    2739             :         /*
    2740             :          * Log replica identity of the deleted tuple if there is one
    2741             :          */
    2742     1618612 :         if (old_key_tuple != NULL)
    2743             :         {
    2744       10994 :             xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    2745       10994 :             xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
    2746       10994 :             xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
    2747             : 
    2748       10994 :             XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
    2749       10994 :             XLogRegisterData((char *) old_key_tuple->t_data
    2750             :                              + SizeofHeapTupleHeader,
    2751       10994 :                              old_key_tuple->t_len
    2752       10994 :                              - SizeofHeapTupleHeader);
    2753             :         }
    2754             : 
    2755             :         /* filtering by origin on a row level is much more efficient */
    2756     1618612 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    2757             : 
    2758     1618612 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    2759             : 
    2760     1618612 :         PageSetLSN(page, recptr);
    2761             :     }
    2762             : 
    2763     1619276 :     END_CRIT_SECTION();
    2764             : 
    2765     1619276 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    2766             : 
    2767     1619276 :     if (vmbuffer != InvalidBuffer)
    2768         694 :         ReleaseBuffer(vmbuffer);
    2769             : 
    2770             :     /*
    2771             :      * If the tuple has toasted out-of-line attributes, we need to delete
    2772             :      * those items too.  We have to do this before releasing the buffer
    2773             :      * because we need to look at the contents of the tuple, but it's OK to
    2774             :      * release the content lock on the buffer first.
    2775             :      */
    2776     1621232 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    2777        1970 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    2778             :     {
    2779             :         /* toast table entries should never be recursively toasted */
    2780             :         Assert(!HeapTupleHasExternal(&tp));
    2781             :     }
    2782     1617320 :     else if (HeapTupleHasExternal(&tp))
    2783         240 :         heap_toast_delete(relation, &tp, false);
    2784             : 
    2785             :     /*
    2786             :      * Mark tuple for invalidation from system caches at next command
    2787             :      * boundary. We have to do this before releasing the buffer because we
    2788             :      * need to look at the contents of the tuple.
    2789             :      */
    2790     1619276 :     CacheInvalidateHeapTuple(relation, &tp, NULL);
    2791             : 
    2792             :     /* Now we can release the buffer */
    2793     1619276 :     ReleaseBuffer(buffer);
    2794             : 
    2795             :     /*
    2796             :      * Release the lmgr tuple lock, if we had it.
    2797             :      */
    2798     1619276 :     if (have_tuple_lock)
    2799          36 :         UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
    2800             : 
    2801     1619276 :     pgstat_count_heap_delete(relation);
    2802             : 
    2803     1619276 :     if (old_key_tuple != NULL && old_key_copied)
    2804       10592 :         heap_freetuple(old_key_tuple);
    2805             : 
    2806     1619276 :     return TM_Ok;
    2807             : }
    2808             : 
    2809             : /*
    2810             :  *  simple_heap_delete - delete a tuple
    2811             :  *
    2812             :  * This routine may be used to delete a tuple when concurrent updates of
    2813             :  * the target tuple are not expected (for example, because we have a lock
    2814             :  * on the relation associated with the tuple).  Any failure is reported
    2815             :  * via ereport().
    2816             :  */
    2817             : void
    2818      742118 : simple_heap_delete(Relation relation, ItemPointer tid)
    2819             : {
    2820             :     TM_Result   result;
    2821             :     TM_FailureData tmfd;
    2822             : 
    2823      742118 :     result = heap_delete(relation, tid,
    2824             :                          GetCurrentCommandId(true), InvalidSnapshot,
    2825             :                          true /* wait for commit */ ,
    2826             :                          &tmfd, false /* changingPart */ );
    2827      742118 :     switch (result)
    2828             :     {
    2829           0 :         case TM_SelfModified:
    2830             :             /* Tuple was already updated in current command? */
    2831           0 :             elog(ERROR, "tuple already updated by self");
    2832             :             break;
    2833             : 
    2834      742118 :         case TM_Ok:
    2835             :             /* done successfully */
    2836      742118 :             break;
    2837             : 
    2838           0 :         case TM_Updated:
    2839           0 :             elog(ERROR, "tuple concurrently updated");
    2840             :             break;
    2841             : 
    2842           0 :         case TM_Deleted:
    2843           0 :             elog(ERROR, "tuple concurrently deleted");
    2844             :             break;
    2845             : 
    2846           0 :         default:
    2847           0 :             elog(ERROR, "unrecognized heap_delete status: %u", result);
    2848             :             break;
    2849             :     }
    2850      742118 : }
    2851             : 
    2852             : /*
    2853             :  *  heap_update - replace a tuple
    2854             :  *
    2855             :  * See table_tuple_update() for an explanation of the parameters, except that
    2856             :  * this routine directly takes a tuple rather than a slot.
    2857             :  *
    2858             :  * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
    2859             :  * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
    2860             :  * only for TM_SelfModified, since we cannot obtain cmax from a combocid
    2861             :  * generated by another transaction).
    2862             :  */
    2863             : TM_Result
    2864      338816 : heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
    2865             :             CommandId cid, Snapshot crosscheck, bool wait,
    2866             :             TM_FailureData *tmfd, LockTupleMode *lockmode)
    2867             : {
    2868             :     TM_Result   result;
    2869      338816 :     TransactionId xid = GetCurrentTransactionId();
    2870             :     Bitmapset  *hot_attrs;
    2871             :     Bitmapset  *key_attrs;
    2872             :     Bitmapset  *id_attrs;
    2873             :     Bitmapset  *interesting_attrs;
    2874             :     Bitmapset  *modified_attrs;
    2875             :     ItemId      lp;
    2876             :     HeapTupleData oldtup;
    2877             :     HeapTuple   heaptup;
    2878      338816 :     HeapTuple   old_key_tuple = NULL;
    2879      338816 :     bool        old_key_copied = false;
    2880             :     Page        page;
    2881             :     BlockNumber block;
    2882             :     MultiXactStatus mxact_status;
    2883             :     Buffer      buffer,
    2884             :                 newbuf,
    2885      338816 :                 vmbuffer = InvalidBuffer,
    2886      338816 :                 vmbuffer_new = InvalidBuffer;
    2887             :     bool        need_toast;
    2888             :     Size        newtupsize,
    2889             :                 pagefree;
    2890      338816 :     bool        have_tuple_lock = false;
    2891             :     bool        iscombo;
    2892      338816 :     bool        use_hot_update = false;
    2893      338816 :     bool        hot_attrs_checked = false;
    2894             :     bool        key_intact;
    2895      338816 :     bool        all_visible_cleared = false;
    2896      338816 :     bool        all_visible_cleared_new = false;
    2897             :     bool        checked_lockers;
    2898             :     bool        locker_remains;
    2899             :     TransactionId xmax_new_tuple,
    2900             :                 xmax_old_tuple;
    2901             :     uint16      infomask_old_tuple,
    2902             :                 infomask2_old_tuple,
    2903             :                 infomask_new_tuple,
    2904             :                 infomask2_new_tuple;
    2905             : 
    2906             :     Assert(ItemPointerIsValid(otid));
    2907             : 
    2908             :     /*
    2909             :      * Forbid this during a parallel operation, lest it allocate a combocid.
    2910             :      * Other workers might need that combocid for visibility checks, and we
    2911             :      * have no provision for broadcasting it to them.
    2912             :      */
    2913      338816 :     if (IsInParallelMode())
    2914           0 :         ereport(ERROR,
    2915             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    2916             :                  errmsg("cannot update tuples during a parallel operation")));
    2917             : 
    2918             :     /*
    2919             :      * Fetch the list of attributes to be checked for various operations.
    2920             :      *
    2921             :      * For HOT considerations, this is wasted effort if we fail to update or
    2922             :      * have to put the new tuple on a different page.  But we must compute the
    2923             :      * list before obtaining buffer lock --- in the worst case, if we are
    2924             :      * doing an update on one of the relevant system catalogs, we could
    2925             :      * deadlock if we try to fetch the list later.  In any case, the relcache
    2926             :      * caches the data so this is usually pretty cheap.
    2927             :      *
    2928             :      * We also need columns used by the replica identity and columns that are
    2929             :      * considered the "key" of rows in the table.
    2930             :      *
    2931             :      * Note that we get copies of each bitmap, so we need not worry about
    2932             :      * relcache flush happening midway through.
    2933             :      */
    2934      338816 :     hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
    2935      338816 :     key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
    2936      338816 :     id_attrs = RelationGetIndexAttrBitmap(relation,
    2937             :                                           INDEX_ATTR_BITMAP_IDENTITY_KEY);
    2938             : 
    2939             : 
    2940      338816 :     block = ItemPointerGetBlockNumber(otid);
    2941      338816 :     buffer = ReadBuffer(relation, block);
    2942      338816 :     page = BufferGetPage(buffer);
    2943             : 
    2944      338816 :     interesting_attrs = NULL;
    2945             : 
    2946             :     /*
    2947             :      * If the page is already full, there is hardly any chance of doing a HOT
    2948             :      * update on this page. It might be wasteful effort to look for index
    2949             :      * column updates only to later reject HOT updates for lack of space in
    2950             :      * the same page. So we be conservative and only fetch hot_attrs if the
    2951             :      * page is not already full. Since we are already holding a pin on the
    2952             :      * buffer, there is no chance that the buffer can get cleaned up
    2953             :      * concurrently and even if that was possible, in the worst case we lose a
    2954             :      * chance to do a HOT update.
    2955             :      */
    2956      338816 :     if (!PageIsFull(page))
    2957             :     {
    2958      239112 :         interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
    2959      239112 :         hot_attrs_checked = true;
    2960             :     }
    2961      338816 :     interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
    2962      338816 :     interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
    2963             : 
    2964             :     /*
    2965             :      * Before locking the buffer, pin the visibility map page if it appears to
    2966             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    2967             :      * in the middle of changing this, so we'll need to recheck after we have
    2968             :      * the lock.
    2969             :      */
    2970      338816 :     if (PageIsAllVisible(page))
    2971        1742 :         visibilitymap_pin(relation, block, &vmbuffer);
    2972             : 
    2973      338816 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    2974             : 
    2975      338816 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
    2976             :     Assert(ItemIdIsNormal(lp));
    2977             : 
    2978             :     /*
    2979             :      * Fill in enough data in oldtup for HeapDetermineModifiedColumns to work
    2980             :      * properly.
    2981             :      */
    2982      338816 :     oldtup.t_tableOid = RelationGetRelid(relation);
    2983      338816 :     oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    2984      338816 :     oldtup.t_len = ItemIdGetLength(lp);
    2985      338816 :     oldtup.t_self = *otid;
    2986             : 
    2987             :     /* the new tuple is ready, except for this: */
    2988      338816 :     newtup->t_tableOid = RelationGetRelid(relation);
    2989             : 
    2990             :     /* Determine columns modified by the update. */
    2991      338816 :     modified_attrs = HeapDetermineModifiedColumns(relation, interesting_attrs,
    2992             :                                                   &oldtup, newtup);
    2993             : 
    2994             :     /*
    2995             :      * If we're not updating any "key" column, we can grab a weaker lock type.
    2996             :      * This allows for more concurrency when we are running simultaneously
    2997             :      * with foreign key checks.
    2998             :      *
    2999             :      * Note that if a column gets detoasted while executing the update, but
    3000             :      * the value ends up being the same, this test will fail and we will use
    3001             :      * the stronger lock.  This is acceptable; the important case to optimize
    3002             :      * is updates that don't manipulate key columns, not those that
    3003             :      * serendipitously arrive at the same key values.
    3004             :      */
    3005      338816 :     if (!bms_overlap(modified_attrs, key_attrs))
    3006             :     {
    3007      334660 :         *lockmode = LockTupleNoKeyExclusive;
    3008      334660 :         mxact_status = MultiXactStatusNoKeyUpdate;
    3009      334660 :         key_intact = true;
    3010             : 
    3011             :         /*
    3012             :          * If this is the first possibly-multixact-able operation in the
    3013             :          * current transaction, set my per-backend OldestMemberMXactId
    3014             :          * setting. We can be certain that the transaction will never become a
    3015             :          * member of any older MultiXactIds than that.  (We have to do this
    3016             :          * even if we end up just using our own TransactionId below, since
    3017             :          * some other backend could incorporate our XID into a MultiXact
    3018             :          * immediately afterwards.)
    3019             :          */
    3020      334660 :         MultiXactIdSetOldestMember();
    3021             :     }
    3022             :     else
    3023             :     {
    3024        4156 :         *lockmode = LockTupleExclusive;
    3025        4156 :         mxact_status = MultiXactStatusUpdate;
    3026        4156 :         key_intact = false;
    3027             :     }
    3028             : 
    3029             :     /*
    3030             :      * Note: beyond this point, use oldtup not otid to refer to old tuple.
    3031             :      * otid may very well point at newtup->t_self, which we will overwrite
    3032             :      * with the new tuple's location, so there's great risk of confusion if we
    3033             :      * use otid anymore.
    3034             :      */
    3035             : 
    3036      338816 : l2:
    3037      338818 :     checked_lockers = false;
    3038      338818 :     locker_remains = false;
    3039      338818 :     result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
    3040             : 
    3041             :     /* see below about the "no wait" case */
    3042             :     Assert(result != TM_BeingModified || wait);
    3043             : 
    3044      338818 :     if (result == TM_Invisible)
    3045             :     {
    3046           0 :         UnlockReleaseBuffer(buffer);
    3047           0 :         ereport(ERROR,
    3048             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
    3049             :                  errmsg("attempted to update invisible tuple")));
    3050             :     }
    3051      338818 :     else if (result == TM_BeingModified && wait)
    3052             :     {
    3053             :         TransactionId xwait;
    3054             :         uint16      infomask;
    3055        7292 :         bool        can_continue = false;
    3056             : 
    3057             :         /*
    3058             :          * XXX note that we don't consider the "no wait" case here.  This
    3059             :          * isn't a problem currently because no caller uses that case, but it
    3060             :          * should be fixed if such a caller is introduced.  It wasn't a
    3061             :          * problem previously because this code would always wait, but now
    3062             :          * that some tuple locks do not conflict with one of the lock modes we
    3063             :          * use, it is possible that this case is interesting to handle
    3064             :          * specially.
    3065             :          *
    3066             :          * This may cause failures with third-party code that calls
    3067             :          * heap_update directly.
    3068             :          */
    3069             : 
    3070             :         /* must copy state data before unlocking buffer */
    3071        7292 :         xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3072        7292 :         infomask = oldtup.t_data->t_infomask;
    3073             : 
    3074             :         /*
    3075             :          * Now we have to do something about the existing locker.  If it's a
    3076             :          * multi, sleep on it; we might be awakened before it is completely
    3077             :          * gone (or even not sleep at all in some cases); we need to preserve
    3078             :          * it as locker, unless it is gone completely.
    3079             :          *
    3080             :          * If it's not a multi, we need to check for sleeping conditions
    3081             :          * before actually going to sleep.  If the update doesn't conflict
    3082             :          * with the locks, we just continue without sleeping (but making sure
    3083             :          * it is preserved).
    3084             :          *
    3085             :          * Before sleeping, we need to acquire tuple lock to establish our
    3086             :          * priority for the tuple (see heap_lock_tuple).  LockTuple will
    3087             :          * release us when we are next-in-line for the tuple.  Note we must
    3088             :          * not acquire the tuple lock until we're sure we're going to sleep;
    3089             :          * otherwise we're open for race conditions with other transactions
    3090             :          * holding the tuple lock which sleep on us.
    3091             :          *
    3092             :          * If we are forced to "start over" below, we keep the tuple lock;
    3093             :          * this arranges that we stay at the head of the line while rechecking
    3094             :          * tuple state.
    3095             :          */
    3096        7292 :         if (infomask & HEAP_XMAX_IS_MULTI)
    3097             :         {
    3098             :             TransactionId update_xact;
    3099             :             int         remain;
    3100          84 :             bool        current_is_member = false;
    3101             : 
    3102          84 :             if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    3103             :                                         *lockmode, &current_is_member))
    3104             :             {
    3105          16 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3106             : 
    3107             :                 /*
    3108             :                  * Acquire the lock, if necessary (but skip it when we're
    3109             :                  * requesting a lock and already have one; avoids deadlock).
    3110             :                  */
    3111          16 :                 if (!current_is_member)
    3112           0 :                     heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3113             :                                          LockWaitBlock, &have_tuple_lock);
    3114             : 
    3115             :                 /* wait for multixact */
    3116          16 :                 MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
    3117             :                                 relation, &oldtup.t_self, XLTW_Update,
    3118             :                                 &remain);
    3119          16 :                 checked_lockers = true;
    3120          16 :                 locker_remains = remain != 0;
    3121          16 :                 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3122             : 
    3123             :                 /*
    3124             :                  * If xwait had just locked the tuple then some other xact
    3125             :                  * could update this tuple before we get to this point.  Check
    3126             :                  * for xmax change, and start over if so.
    3127             :                  */
    3128          16 :                 if (xmax_infomask_changed(oldtup.t_data->t_infomask,
    3129          16 :                                           infomask) ||
    3130          16 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3131             :                                          xwait))
    3132           0 :                     goto l2;
    3133             :             }
    3134             : 
    3135             :             /*
    3136             :              * Note that the multixact may not be done by now.  It could have
    3137             :              * surviving members; our own xact or other subxacts of this
    3138             :              * backend, and also any other concurrent transaction that locked
    3139             :              * the tuple with LockTupleKeyShare if we only got
    3140             :              * LockTupleNoKeyExclusive.  If this is the case, we have to be
    3141             :              * careful to mark the updated tuple with the surviving members in
    3142             :              * Xmax.
    3143             :              *
    3144             :              * Note that there could have been another update in the
    3145             :              * MultiXact. In that case, we need to check whether it committed
    3146             :              * or aborted. If it aborted we are safe to update it again;
    3147             :              * otherwise there is an update conflict, and we have to return
    3148             :              * TableTuple{Deleted, Updated} below.
    3149             :              *
    3150             :              * In the LockTupleExclusive case, we still need to preserve the
    3151             :              * surviving members: those would include the tuple locks we had
    3152             :              * before this one, which are important to keep in case this
    3153             :              * subxact aborts.
    3154             :              */
    3155          84 :             if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
    3156          16 :                 update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
    3157             :             else
    3158          68 :                 update_xact = InvalidTransactionId;
    3159             : 
    3160             :             /*
    3161             :              * There was no UPDATE in the MultiXact; or it aborted. No
    3162             :              * TransactionIdIsInProgress() call needed here, since we called
    3163             :              * MultiXactIdWait() above.
    3164             :              */
    3165         100 :             if (!TransactionIdIsValid(update_xact) ||
    3166          16 :                 TransactionIdDidAbort(update_xact))
    3167          70 :                 can_continue = true;
    3168             :         }
    3169        7208 :         else if (TransactionIdIsCurrentTransactionId(xwait))
    3170             :         {
    3171             :             /*
    3172             :              * The only locker is ourselves; we can avoid grabbing the tuple
    3173             :              * lock here, but must preserve our locking information.
    3174             :              */
    3175        7072 :             checked_lockers = true;
    3176        7072 :             locker_remains = true;
    3177        7072 :             can_continue = true;
    3178             :         }
    3179         136 :         else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
    3180             :         {
    3181             :             /*
    3182             :              * If it's just a key-share locker, and we're not changing the key
    3183             :              * columns, we don't need to wait for it to end; but we need to
    3184             :              * preserve it as locker.
    3185             :              */
    3186          58 :             checked_lockers = true;
    3187          58 :             locker_remains = true;
    3188          58 :             can_continue = true;
    3189             :         }
    3190             :         else
    3191             :         {
    3192             :             /*
    3193             :              * Wait for regular transaction to end; but first, acquire tuple
    3194             :              * lock.
    3195             :              */
    3196          78 :             LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3197          78 :             heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
    3198             :                                  LockWaitBlock, &have_tuple_lock);
    3199          78 :             XactLockTableWait(xwait, relation, &oldtup.t_self,
    3200             :                               XLTW_Update);
    3201          78 :             checked_lockers = true;
    3202          78 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3203             : 
    3204             :             /*
    3205             :              * xwait is done, but if xwait had just locked the tuple then some
    3206             :              * other xact could update this tuple before we get to this point.
    3207             :              * Check for xmax change, and start over if so.
    3208             :              */
    3209          78 :             if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
    3210          76 :                 !TransactionIdEquals(xwait,
    3211             :                                      HeapTupleHeaderGetRawXmax(oldtup.t_data)))
    3212           2 :                 goto l2;
    3213             : 
    3214             :             /* Otherwise check if it committed or aborted */
    3215          76 :             UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
    3216          76 :             if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
    3217          22 :                 can_continue = true;
    3218             :         }
    3219             : 
    3220        7290 :         if (can_continue)
    3221        7222 :             result = TM_Ok;
    3222          68 :         else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid) ||
    3223           4 :                  HeapTupleHeaderIndicatesMovedPartitions(oldtup.t_data))
    3224          64 :             result = TM_Updated;
    3225             :         else
    3226           4 :             result = TM_Deleted;
    3227             :     }
    3228             : 
    3229      338816 :     if (crosscheck != InvalidSnapshot && result == TM_Ok)
    3230             :     {
    3231             :         /* Perform additional check for transaction-snapshot mode RI updates */
    3232           0 :         if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
    3233             :         {
    3234           0 :             result = TM_Updated;
    3235             :             Assert(!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
    3236             :         }
    3237             :     }
    3238             : 
    3239      338816 :     if (result != TM_Ok)
    3240             :     {
    3241             :         Assert(result == TM_SelfModified ||
    3242             :                result == TM_Updated ||
    3243             :                result == TM_Deleted ||
    3244             :                result == TM_BeingModified);
    3245             :         Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
    3246             :         Assert(result != TM_Updated ||
    3247             :                !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
    3248         168 :         tmfd->ctid = oldtup.t_data->t_ctid;
    3249         168 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
    3250         168 :         if (result == TM_SelfModified)
    3251          56 :             tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
    3252             :         else
    3253         112 :             tmfd->cmax = InvalidCommandId;
    3254         168 :         UnlockReleaseBuffer(buffer);
    3255         168 :         if (have_tuple_lock)
    3256          54 :             UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    3257         168 :         if (vmbuffer != InvalidBuffer)
    3258           0 :             ReleaseBuffer(vmbuffer);
    3259         168 :         bms_free(hot_attrs);
    3260         168 :         bms_free(key_attrs);
    3261         168 :         bms_free(id_attrs);
    3262         168 :         bms_free(modified_attrs);
    3263         168 :         bms_free(interesting_attrs);
    3264         168 :         return result;
    3265             :     }
    3266             : 
    3267             :     /*
    3268             :      * If we didn't pin the visibility map page and the page has become all
    3269             :      * visible while we were busy locking the buffer, or during some
    3270             :      * subsequent window during which we had it unlocked, we'll have to unlock
    3271             :      * and re-lock, to avoid holding the buffer lock across an I/O.  That's a
    3272             :      * bit unfortunate, especially since we'll now have to recheck whether the
    3273             :      * tuple has been locked or updated under us, but hopefully it won't
    3274             :      * happen very often.
    3275             :      */
    3276      338648 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    3277             :     {
    3278           0 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3279           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    3280           0 :         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3281           0 :         goto l2;
    3282             :     }
    3283             : 
    3284             :     /* Fill in transaction status data */
    3285             : 
    3286             :     /*
    3287             :      * If the tuple we're updating is locked, we need to preserve the locking
    3288             :      * info in the old tuple's Xmax.  Prepare a new Xmax value for this.
    3289             :      */
    3290     1015944 :     compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3291      338648 :                               oldtup.t_data->t_infomask,
    3292      338648 :                               oldtup.t_data->t_infomask2,
    3293             :                               xid, *lockmode, true,
    3294             :                               &xmax_old_tuple, &infomask_old_tuple,
    3295             :                               &infomask2_old_tuple);
    3296             : 
    3297             :     /*
    3298             :      * And also prepare an Xmax value for the new copy of the tuple.  If there
    3299             :      * was no xmax previously, or there was one but all lockers are now gone,
    3300             :      * then use InvalidXid; otherwise, get the xmax from the old tuple.  (In
    3301             :      * rare cases that might also be InvalidXid and yet not have the
    3302             :      * HEAP_XMAX_INVALID bit set; that's fine.)
    3303             :      */
    3304      338648 :     if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
    3305        7200 :         HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
    3306        7132 :         (checked_lockers && !locker_remains))
    3307      331448 :         xmax_new_tuple = InvalidTransactionId;
    3308             :     else
    3309        7200 :         xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
    3310             : 
    3311      338648 :     if (!TransactionIdIsValid(xmax_new_tuple))
    3312             :     {
    3313      331448 :         infomask_new_tuple = HEAP_XMAX_INVALID;
    3314      331448 :         infomask2_new_tuple = 0;
    3315             :     }
    3316             :     else
    3317             :     {
    3318             :         /*
    3319             :          * If we found a valid Xmax for the new tuple, then the infomask bits
    3320             :          * to use on the new tuple depend on what was there on the old one.
    3321             :          * Note that since we're doing an update, the only possibility is that
    3322             :          * the lockers had FOR KEY SHARE lock.
    3323             :          */
    3324        7200 :         if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
    3325             :         {
    3326          70 :             GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
    3327             :                                    &infomask2_new_tuple);
    3328             :         }
    3329             :         else
    3330             :         {
    3331        7130 :             infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
    3332        7130 :             infomask2_new_tuple = 0;
    3333             :         }
    3334             :     }
    3335             : 
    3336             :     /*
    3337             :      * Prepare the new tuple with the appropriate initial values of Xmin and
    3338             :      * Xmax, as well as initial infomask bits as computed above.
    3339             :      */
    3340      338648 :     newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
    3341      338648 :     newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
    3342      338648 :     HeapTupleHeaderSetXmin(newtup->t_data, xid);
    3343      338648 :     HeapTupleHeaderSetCmin(newtup->t_data, cid);
    3344      338648 :     newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
    3345      338648 :     newtup->t_data->t_infomask2 |= infomask2_new_tuple;
    3346      338648 :     HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
    3347             : 
    3348             :     /*
    3349             :      * Replace cid with a combo cid if necessary.  Note that we already put
    3350             :      * the plain cid into the new tuple.
    3351             :      */
    3352      338648 :     HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
    3353             : 
    3354             :     /*
    3355             :      * If the toaster needs to be activated, OR if the new tuple will not fit
    3356             :      * on the same page as the old, then we need to release the content lock
    3357             :      * (but not the pin!) on the old tuple's buffer while we are off doing
    3358             :      * TOAST and/or table-file-extension work.  We must mark the old tuple to
    3359             :      * show that it's locked, else other processes may try to update it
    3360             :      * themselves.
    3361             :      *
    3362             :      * We need to invoke the toaster if there are already any out-of-line
    3363             :      * toasted values present, or if the new tuple is over-threshold.
    3364             :      */
    3365      338648 :     if (relation->rd_rel->relkind != RELKIND_RELATION &&
    3366           0 :         relation->rd_rel->relkind != RELKIND_MATVIEW)
    3367             :     {
    3368             :         /* toast table entries should never be recursively toasted */
    3369             :         Assert(!HeapTupleHasExternal(&oldtup));
    3370             :         Assert(!HeapTupleHasExternal(newtup));
    3371           0 :         need_toast = false;
    3372             :     }
    3373             :     else
    3374      338648 :         need_toast = (HeapTupleHasExternal(&oldtup) ||
    3375      676810 :                       HeapTupleHasExternal(newtup) ||
    3376      338162 :                       newtup->t_len > TOAST_TUPLE_THRESHOLD);
    3377             : 
    3378      338648 :     pagefree = PageGetHeapFreeSpace(page);
    3379             : 
    3380      338648 :     newtupsize = MAXALIGN(newtup->t_len);
    3381             : 
    3382      338648 :     if (need_toast || newtupsize > pagefree)
    3383      118974 :     {
    3384             :         TransactionId xmax_lock_old_tuple;
    3385             :         uint16      infomask_lock_old_tuple,
    3386             :                     infomask2_lock_old_tuple;
    3387      118974 :         bool        cleared_all_frozen = false;
    3388             : 
    3389             :         /*
    3390             :          * To prevent concurrent sessions from updating the tuple, we have to
    3391             :          * temporarily mark it locked, while we release the page-level lock.
    3392             :          *
    3393             :          * To satisfy the rule that any xid potentially appearing in a buffer
    3394             :          * written out to disk, we unfortunately have to WAL log this
    3395             :          * temporary modification.  We can reuse xl_heap_lock for this
    3396             :          * purpose.  If we crash/error before following through with the
    3397             :          * actual update, xmax will be of an aborted transaction, allowing
    3398             :          * other sessions to proceed.
    3399             :          */
    3400             : 
    3401             :         /*
    3402             :          * Compute xmax / infomask appropriate for locking the tuple. This has
    3403             :          * to be done separately from the combo that's going to be used for
    3404             :          * updating, because the potentially created multixact would otherwise
    3405             :          * be wrong.
    3406             :          */
    3407      356922 :         compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
    3408      118974 :                                   oldtup.t_data->t_infomask,
    3409      118974 :                                   oldtup.t_data->t_infomask2,
    3410             :                                   xid, *lockmode, false,
    3411             :                                   &xmax_lock_old_tuple, &infomask_lock_old_tuple,
    3412             :                                   &infomask2_lock_old_tuple);
    3413             : 
    3414             :         Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
    3415             : 
    3416      118974 :         START_CRIT_SECTION();
    3417             : 
    3418             :         /* Clear obsolete visibility flags ... */
    3419      118974 :         oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3420      118974 :         oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3421      118974 :         HeapTupleClearHotUpdated(&oldtup);
    3422             :         /* ... and store info about transaction updating this tuple */
    3423             :         Assert(TransactionIdIsValid(xmax_lock_old_tuple));
    3424      118974 :         HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
    3425      118974 :         oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
    3426      118974 :         oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
    3427      118974 :         HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    3428             : 
    3429             :         /* temporarily make it look not-updated, but locked */
    3430      118974 :         oldtup.t_data->t_ctid = oldtup.t_self;
    3431             : 
    3432             :         /*
    3433             :          * Clear all-frozen bit on visibility map if needed. We could
    3434             :          * immediately reset ALL_VISIBLE, but given that the WAL logging
    3435             :          * overhead would be unchanged, that doesn't seem necessarily
    3436             :          * worthwhile.
    3437             :          */
    3438      119984 :         if (PageIsAllVisible(BufferGetPage(buffer)) &&
    3439        1010 :             visibilitymap_clear(relation, block, vmbuffer,
    3440             :                                 VISIBILITYMAP_ALL_FROZEN))
    3441         720 :             cleared_all_frozen = true;
    3442             : 
    3443      118974 :         MarkBufferDirty(buffer);
    3444             : 
    3445      118974 :         if (RelationNeedsWAL(relation))
    3446             :         {
    3447             :             xl_heap_lock xlrec;
    3448             :             XLogRecPtr  recptr;
    3449             : 
    3450      118738 :             XLogBeginInsert();
    3451      118738 :             XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    3452             : 
    3453      118738 :             xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
    3454      118738 :             xlrec.locking_xid = xmax_lock_old_tuple;
    3455      237476 :             xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
    3456      118738 :                                                   oldtup.t_data->t_infomask2);
    3457      118738 :             xlrec.flags =
    3458      118738 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    3459      118738 :             XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
    3460      118738 :             recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    3461      118738 :             PageSetLSN(page, recptr);
    3462             :         }
    3463             : 
    3464      118974 :         END_CRIT_SECTION();
    3465             : 
    3466      118974 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3467             : 
    3468             :         /*
    3469             :          * Let the toaster do its thing, if needed.
    3470             :          *
    3471             :          * Note: below this point, heaptup is the data we actually intend to
    3472             :          * store into the relation; newtup is the caller's original untoasted
    3473             :          * data.
    3474             :          */
    3475      118974 :         if (need_toast)
    3476             :         {
    3477             :             /* Note we always use WAL and FSM during updates */
    3478        1450 :             heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
    3479        1450 :             newtupsize = MAXALIGN(heaptup->t_len);
    3480             :         }
    3481             :         else
    3482      117524 :             heaptup = newtup;
    3483             : 
    3484             :         /*
    3485             :          * Now, do we need a new page for the tuple, or not?  This is a bit
    3486             :          * tricky since someone else could have added tuples to the page while
    3487             :          * we weren't looking.  We have to recheck the available space after
    3488             :          * reacquiring the buffer lock.  But don't bother to do that if the
    3489             :          * former amount of free space is still not enough; it's unlikely
    3490             :          * there's more free now than before.
    3491             :          *
    3492             :          * What's more, if we need to get a new page, we will need to acquire
    3493             :          * buffer locks on both old and new pages.  To avoid deadlock against
    3494             :          * some other backend trying to get the same two locks in the other
    3495             :          * order, we must be consistent about the order we get the locks in.
    3496             :          * We use the rule "lock the lower-numbered page of the relation
    3497             :          * first".  To implement this, we must do RelationGetBufferForTuple
    3498             :          * while not holding the lock on the old page, and we must rely on it
    3499             :          * to get the locks on both pages in the correct order.
    3500             :          */
    3501      118974 :         if (newtupsize > pagefree)
    3502             :         {
    3503             :             /* Assume there's no chance to put heaptup on same page. */
    3504      118486 :             newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
    3505             :                                                buffer, 0, NULL,
    3506             :                                                &vmbuffer_new, &vmbuffer);
    3507             :         }
    3508             :         else
    3509             :         {
    3510             :             /* Re-acquire the lock on the old tuple's page. */
    3511         488 :             LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    3512             :             /* Re-check using the up-to-date free space */
    3513         488 :             pagefree = PageGetHeapFreeSpace(page);
    3514         488 :             if (newtupsize > pagefree)
    3515             :             {
    3516             :                 /*
    3517             :                  * Rats, it doesn't fit anymore.  We must now unlock and
    3518             :                  * relock to avoid deadlock.  Fortunately, this path should
    3519             :                  * seldom be taken.
    3520             :                  */
    3521           0 :                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3522           0 :                 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
    3523             :                                                    buffer, 0, NULL,
    3524             :                                                    &vmbuffer_new, &vmbuffer);
    3525             :             }
    3526             :             else
    3527             :             {
    3528             :                 /* OK, it fits here, so we're done. */
    3529         488 :                 newbuf = buffer;
    3530             :             }
    3531             :         }
    3532             :     }
    3533             :     else
    3534             :     {
    3535             :         /* No TOAST work needed, and it'll fit on same page */
    3536      219674 :         newbuf = buffer;
    3537      219674 :         heaptup = newtup;
    3538             :     }
    3539             : 
    3540             :     /*
    3541             :      * We're about to do the actual update -- check for conflict first, to
    3542             :      * avoid possibly having to roll back work we've just done.
    3543             :      *
    3544             :      * This is safe without a recheck as long as there is no possibility of
    3545             :      * another process scanning the pages between this check and the update
    3546             :      * being visible to the scan (i.e., exclusive buffer content lock(s) are
    3547             :      * continuously held from this point until the tuple update is visible).
    3548             :      *
    3549             :      * For the new tuple the only check needed is at the relation level, but
    3550             :      * since both tuples are in the same relation and the check for oldtup
    3551             :      * will include checking the relation level, there is no benefit to a
    3552             :      * separate check for the new tuple.
    3553             :      */
    3554      338648 :     CheckForSerializableConflictIn(relation, otid, BufferGetBlockNumber(buffer));
    3555             : 
    3556             :     /*
    3557             :      * At this point newbuf and buffer are both pinned and locked, and newbuf
    3558             :      * has enough space for the new tuple.  If they are the same buffer, only
    3559             :      * one pin is held.
    3560             :      */
    3561             : 
    3562      338624 :     if (newbuf == buffer)
    3563             :     {
    3564             :         /*
    3565             :          * Since the new tuple is going into the same page, we might be able
    3566             :          * to do a HOT update.  Check if any of the index columns have been
    3567             :          * changed. If the page was already full, we may have skipped checking
    3568             :          * for index columns, and also can't do a HOT update.
    3569             :          */
    3570      220138 :         if (hot_attrs_checked && !bms_overlap(modified_attrs, hot_attrs))
    3571      205742 :             use_hot_update = true;
    3572             :     }
    3573             :     else
    3574             :     {
    3575             :         /* Set a hint that the old page could use prune/defrag */
    3576      118486 :         PageSetFull(page);
    3577             :     }
    3578             : 
    3579             :     /*
    3580             :      * Compute replica identity tuple before entering the critical section so
    3581             :      * we don't PANIC upon a memory allocation failure.
    3582             :      * ExtractReplicaIdentity() will return NULL if nothing needs to be
    3583             :      * logged.
    3584             :      */
    3585      338624 :     old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
    3586      338624 :                                            bms_overlap(modified_attrs, id_attrs),
    3587             :                                            &old_key_copied);
    3588             : 
    3589             :     /* NO EREPORT(ERROR) from here till changes are logged */
    3590      338624 :     START_CRIT_SECTION();
    3591             : 
    3592             :     /*
    3593             :      * If this transaction commits, the old tuple will become DEAD sooner or
    3594             :      * later.  Set flag that this page is a candidate for pruning once our xid
    3595             :      * falls below the OldestXmin horizon.  If the transaction finally aborts,
    3596             :      * the subsequent page pruning will be a no-op and the hint will be
    3597             :      * cleared.
    3598             :      *
    3599             :      * XXX Should we set hint on newbuf as well?  If the transaction aborts,
    3600             :      * there would be a prunable tuple in the newbuf; but for now we choose
    3601             :      * not to optimize for aborts.  Note that heap_xlog_update must be kept in
    3602             :      * sync if this decision changes.
    3603             :      */
    3604      338624 :     PageSetPrunable(page, xid);
    3605             : 
    3606      338624 :     if (use_hot_update)
    3607             :     {
    3608             :         /* Mark the old tuple as HOT-updated */
    3609      205742 :         HeapTupleSetHotUpdated(&oldtup);
    3610             :         /* And mark the new tuple as heap-only */
    3611      205742 :         HeapTupleSetHeapOnly(heaptup);
    3612             :         /* Mark the caller's copy too, in case different from heaptup */
    3613      205742 :         HeapTupleSetHeapOnly(newtup);
    3614             :     }
    3615             :     else
    3616             :     {
    3617             :         /* Make sure tuples are correctly marked as not-HOT */
    3618      132882 :         HeapTupleClearHotUpdated(&oldtup);
    3619      132882 :         HeapTupleClearHeapOnly(heaptup);
    3620      132882 :         HeapTupleClearHeapOnly(newtup);
    3621             :     }
    3622             : 
    3623      338624 :     RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
    3624             : 
    3625             : 
    3626             :     /* Clear obsolete visibility flags, possibly set by ourselves above... */
    3627      338624 :     oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    3628      338624 :     oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    3629             :     /* ... and store info about transaction updating this tuple */
    3630             :     Assert(TransactionIdIsValid(xmax_old_tuple));
    3631      338624 :     HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
    3632      338624 :     oldtup.t_data->t_infomask |= infomask_old_tuple;
    3633      338624 :     oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
    3634      338624 :     HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
    3635             : 
    3636             :     /* record address of new tuple in t_ctid of old one */
    3637      338624 :     oldtup.t_data->t_ctid = heaptup->t_self;
    3638             : 
    3639             :     /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
    3640      338624 :     if (PageIsAllVisible(BufferGetPage(buffer)))
    3641             :     {
    3642        1742 :         all_visible_cleared = true;
    3643        1742 :         PageClearAllVisible(BufferGetPage(buffer));
    3644        1742 :         visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
    3645             :                             vmbuffer, VISIBILITYMAP_VALID_BITS);
    3646             :     }
    3647      338624 :     if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
    3648             :     {
    3649         788 :         all_visible_cleared_new = true;
    3650         788 :         PageClearAllVisible(BufferGetPage(newbuf));
    3651         788 :         visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
    3652             :                             vmbuffer_new, VISIBILITYMAP_VALID_BITS);
    3653             :     }
    3654             : 
    3655      338624 :     if (newbuf != buffer)
    3656      118486 :         MarkBufferDirty(newbuf);
    3657      338624 :     MarkBufferDirty(buffer);
    3658             : 
    3659             :     /* XLOG stuff */
    3660      338624 :     if (RelationNeedsWAL(relation))
    3661             :     {
    3662             :         XLogRecPtr  recptr;
    3663             : 
    3664             :         /*
    3665             :          * For logical decoding we need combocids to properly decode the
    3666             :          * catalog.
    3667             :          */
    3668      336988 :         if (RelationIsAccessibleInLogicalDecoding(relation))
    3669             :         {
    3670        1870 :             log_heap_new_cid(relation, &oldtup);
    3671        1870 :             log_heap_new_cid(relation, heaptup);
    3672             :         }
    3673             : 
    3674      336988 :         recptr = log_heap_update(relation, buffer,
    3675             :                                  newbuf, &oldtup, heaptup,
    3676             :                                  old_key_tuple,
    3677             :                                  all_visible_cleared,
    3678             :                                  all_visible_cleared_new);
    3679      336988 :         if (newbuf != buffer)
    3680             :         {
    3681      118250 :             PageSetLSN(BufferGetPage(newbuf), recptr);
    3682             :         }
    3683      336988 :         PageSetLSN(BufferGetPage(buffer), recptr);
    3684             :     }
    3685             : 
    3686      338624 :     END_CRIT_SECTION();
    3687             : 
    3688      338624 :     if (newbuf != buffer)
    3689      118486 :         LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
    3690      338624 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    3691             : 
    3692             :     /*
    3693             :      * Mark old tuple for invalidation from system caches at next command
    3694             :      * boundary, and mark the new tuple for invalidation in case we abort. We
    3695             :      * have to do this before releasing the buffer because oldtup is in the
    3696             :      * buffer.  (heaptup is all in local memory, but it's necessary to process
    3697             :      * both tuple versions in one call to inval.c so we can avoid redundant
    3698             :      * sinval messages.)
    3699             :      */
    3700      338624 :     CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
    3701             : 
    3702             :     /* Now we can release the buffer(s) */
    3703      338624 :     if (newbuf != buffer)
    3704      118486 :         ReleaseBuffer(newbuf);
    3705      338624 :     ReleaseBuffer(buffer);
    3706      338624 :     if (BufferIsValid(vmbuffer_new))
    3707         788 :         ReleaseBuffer(vmbuffer_new);
    3708      338624 :     if (BufferIsValid(vmbuffer))
    3709        1742 :         ReleaseBuffer(vmbuffer);
    3710             : 
    3711             :     /*
    3712             :      * Release the lmgr tuple lock, if we had it.
    3713             :      */
    3714      338624 :     if (have_tuple_lock)
    3715          22 :         UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
    3716             : 
    3717      338624 :     pgstat_count_heap_update(relation, use_hot_update);
    3718             : 
    3719             :     /*
    3720             :      * If heaptup is a private copy, release it.  Don't forget to copy t_self
    3721             :      * back to the caller's image, too.
    3722             :      */
    3723      338624 :     if (heaptup != newtup)
    3724             :     {
    3725        1290 :         newtup->t_self = heaptup->t_self;
    3726        1290 :         heap_freetuple(heaptup);
    3727             :     }
    3728             : 
    3729      338624 :     if (old_key_tuple != NULL && old_key_copied)
    3730         236 :         heap_freetuple(old_key_tuple);
    3731             : 
    3732      338624 :     bms_free(hot_attrs);
    3733      338624 :     bms_free(key_attrs);
    3734      338624 :     bms_free(id_attrs);
    3735      338624 :     bms_free(modified_attrs);
    3736      338624 :     bms_free(interesting_attrs);
    3737             : 
    3738      338624 :     return TM_Ok;
    3739             : }
    3740             : 
    3741             : /*
    3742             :  * Check if the specified attribute's value is same in both given tuples.
    3743             :  * Subroutine for HeapDetermineModifiedColumns.
    3744             :  */
    3745             : static bool
    3746     1065520 : heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
    3747             :                        HeapTuple tup1, HeapTuple tup2)
    3748             : {
    3749             :     Datum       value1,
    3750             :                 value2;
    3751             :     bool        isnull1,
    3752             :                 isnull2;
    3753             :     Form_pg_attribute att;
    3754             : 
    3755             :     /*
    3756             :      * If it's a whole-tuple reference, say "not equal".  It's not really
    3757             :      * worth supporting this case, since it could only succeed after a no-op
    3758             :      * update, which is hardly a case worth optimizing for.
    3759             :      */
    3760     1065520 :     if (attrnum == 0)
    3761           0 :         return false;
    3762             : 
    3763             :     /*
    3764             :      * Likewise, automatically say "not equal" for any system attribute other
    3765             :      * than tableOID; we cannot expect these to be consistent in a HOT chain,
    3766             :      * or even to be set correctly yet in the new tuple.
    3767             :      */
    3768     1065520 :     if (attrnum < 0)
    3769             :     {
    3770           0 :         if (attrnum != TableOidAttributeNumber)
    3771           0 :             return false;
    3772             :     }
    3773             : 
    3774             :     /*
    3775             :      * Extract the corresponding values.  XXX this is pretty inefficient if
    3776             :      * there are many indexed columns.  Should HeapDetermineModifiedColumns do
    3777             :      * a single heap_deform_tuple call on each tuple, instead?  But that
    3778             :      * doesn't work for system columns ...
    3779             :      */
    3780     1065520 :     value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
    3781     1065520 :     value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
    3782             : 
    3783             :     /*
    3784             :      * If one value is NULL and other is not, then they are certainly not
    3785             :      * equal
    3786             :      */
    3787     1065520 :     if (isnull1 != isnull2)
    3788           4 :         return false;
    3789             : 
    3790             :     /*
    3791             :      * If both are NULL, they can be considered equal.
    3792             :      */
    3793     1065516 :     if (isnull1)
    3794        2632 :         return true;
    3795             : 
    3796             :     /*
    3797             :      * We do simple binary comparison of the two datums.  This may be overly
    3798             :      * strict because there can be multiple binary representations for the
    3799             :      * same logical value.  But we should be OK as long as there are no false
    3800             :      * positives.  Using a type-specific equality operator is messy because
    3801             :      * there could be multiple notions of equality in different operator
    3802             :      * classes; furthermore, we cannot safely invoke user-defined functions
    3803             :      * while holding exclusive buffer lock.
    3804             :      */
    3805     1062884 :     if (attrnum <= 0)
    3806             :     {
    3807             :         /* The only allowed system columns are OIDs, so do this */
    3808           0 :         return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
    3809             :     }
    3810             :     else
    3811             :     {
    3812             :         Assert(attrnum <= tupdesc->natts);
    3813     1062884 :         att = TupleDescAttr(tupdesc, attrnum - 1);
    3814     1062884 :         return datumIsEqual(value1, value2, att->attbyval, att->attlen);
    3815             :     }
    3816             : }
    3817             : 
    3818             : /*
    3819             :  * Check which columns are being updated.
    3820             :  *
    3821             :  * Given an updated tuple, determine (and return into the output bitmapset),
    3822             :  * from those listed as interesting, the set of columns that changed.
    3823             :  *
    3824             :  * The input bitmapset is destructively modified; that is OK since this is
    3825             :  * invoked at most once in heap_update.
    3826             :  */
    3827             : static Bitmapset *
    3828      338816 : HeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols,
    3829             :                              HeapTuple oldtup, HeapTuple newtup)
    3830             : {
    3831             :     int         attnum;
    3832      338816 :     Bitmapset  *modified = NULL;
    3833             : 
    3834     1404336 :     while ((attnum = bms_first_member(interesting_cols)) >= 0)
    3835             :     {
    3836     1065520 :         attnum += FirstLowInvalidHeapAttributeNumber;
    3837             : 
    3838     1065520 :         if (!heap_tuple_attr_equals(RelationGetDescr(relation),
    3839             :                                     attnum, oldtup, newtup))
    3840       14932 :             modified = bms_add_member(modified,
    3841             :                                       attnum - FirstLowInvalidHeapAttributeNumber);
    3842             :     }
    3843             : 
    3844      338816 :     return modified;
    3845             : }
    3846             : 
    3847             : /*
    3848             :  *  simple_heap_update - replace a tuple
    3849             :  *
    3850             :  * This routine may be used to update a tuple when concurrent updates of
    3851             :  * the target tuple are not expected (for example, because we have a lock
    3852             :  * on the relation associated with the tuple).  Any failure is reported
    3853             :  * via ereport().
    3854             :  */
    3855             : void
    3856      206590 : simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
    3857             : {
    3858             :     TM_Result   result;
    3859             :     TM_FailureData tmfd;
    3860             :     LockTupleMode lockmode;
    3861             : 
    3862      206590 :     result = heap_update(relation, otid, tup,
    3863             :                          GetCurrentCommandId(true), InvalidSnapshot,
    3864             :                          true /* wait for commit */ ,
    3865             :                          &tmfd, &lockmode);
    3866      206590 :     switch (result)
    3867             :     {
    3868           0 :         case TM_SelfModified:
    3869             :             /* Tuple was already updated in current command? */
    3870           0 :             elog(ERROR, "tuple already updated by self");
    3871             :             break;
    3872             : 
    3873      206590 :         case TM_Ok:
    3874             :             /* done successfully */
    3875      206590 :             break;
    3876             : 
    3877           0 :         case TM_Updated:
    3878           0 :             elog(ERROR, "tuple concurrently updated");
    3879             :             break;
    3880             : 
    3881           0 :         case TM_Deleted:
    3882           0 :             elog(ERROR, "tuple concurrently deleted");
    3883             :             break;
    3884             : 
    3885           0 :         default:
    3886           0 :             elog(ERROR, "unrecognized heap_update status: %u", result);
    3887             :             break;
    3888             :     }
    3889      206590 : }
    3890             : 
    3891             : 
    3892             : /*
    3893             :  * Return the MultiXactStatus corresponding to the given tuple lock mode.
    3894             :  */
    3895             : static MultiXactStatus
    3896        2176 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
    3897             : {
    3898             :     int         retval;
    3899             : 
    3900        2176 :     if (is_update)
    3901         150 :         retval = tupleLockExtraInfo[mode].updstatus;
    3902             :     else
    3903        2026 :         retval = tupleLockExtraInfo[mode].lockstatus;
    3904             : 
    3905        2176 :     if (retval == -1)
    3906           0 :         elog(ERROR, "invalid lock tuple mode %d/%s", mode,
    3907             :              is_update ? "true" : "false");
    3908             : 
    3909        2176 :     return (MultiXactStatus) retval;
    3910             : }
    3911             : 
    3912             : /*
    3913             :  *  heap_lock_tuple - lock a tuple in shared or exclusive mode
    3914             :  *
    3915             :  * Note that this acquires a buffer pin, which the caller must release.
    3916             :  *
    3917             :  * Input parameters:
    3918             :  *  relation: relation containing tuple (caller must hold suitable lock)
    3919             :  *  tid: TID of tuple to lock
    3920             :  *  cid: current command ID (used for visibility test, and stored into
    3921             :  *      tuple's cmax if lock is successful)
    3922             :  *  mode: indicates if shared or exclusive tuple lock is desired
    3923             :  *  wait_policy: what to do if tuple lock is not available
    3924             :  *  follow_updates: if true, follow the update chain to also lock descendant
    3925             :  *      tuples.
    3926             :  *
    3927             :  * Output parameters:
    3928             :  *  *tuple: all fields filled in
    3929             :  *  *buffer: set to buffer holding tuple (pinned but not locked at exit)
    3930             :  *  *tmfd: filled in failure cases (see below)
    3931             :  *
    3932             :  * Function results are the same as the ones for table_tuple_lock().
    3933             :  *
    3934             :  * In the failure cases other than TM_Invisible, the routine fills
    3935             :  * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
    3936             :  * if necessary), and t_cmax (the last only for TM_SelfModified,
    3937             :  * since we cannot obtain cmax from a combocid generated by another
    3938             :  * transaction).
    3939             :  * See comments for struct TM_FailureData for additional info.
    3940             :  *
    3941             :  * See README.tuplock for a thorough explanation of this mechanism.
    3942             :  */
    3943             : TM_Result
    3944       18616 : heap_lock_tuple(Relation relation, HeapTuple tuple,
    3945             :                 CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
    3946             :                 bool follow_updates,
    3947             :                 Buffer *buffer, TM_FailureData *tmfd)
    3948             : {
    3949             :     TM_Result   result;
    3950       18616 :     ItemPointer tid = &(tuple->t_self);
    3951             :     ItemId      lp;
    3952             :     Page        page;
    3953       18616 :     Buffer      vmbuffer = InvalidBuffer;
    3954             :     BlockNumber block;
    3955             :     TransactionId xid,
    3956             :                 xmax;
    3957             :     uint16      old_infomask,
    3958             :                 new_infomask,
    3959             :                 new_infomask2;
    3960       18616 :     bool        first_time = true;
    3961       18616 :     bool        skip_tuple_lock = false;
    3962       18616 :     bool        have_tuple_lock = false;
    3963       18616 :     bool        cleared_all_frozen = false;
    3964             : 
    3965       18616 :     *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    3966       18616 :     block = ItemPointerGetBlockNumber(tid);
    3967             : 
    3968             :     /*
    3969             :      * Before locking the buffer, pin the visibility map page if it appears to
    3970             :      * be necessary.  Since we haven't got the lock yet, someone else might be
    3971             :      * in the middle of changing this, so we'll need to recheck after we have
    3972             :      * the lock.
    3973             :      */
    3974       18616 :     if (PageIsAllVisible(BufferGetPage(*buffer)))
    3975        3078 :         visibilitymap_pin(relation, block, &vmbuffer);
    3976             : 
    3977       18616 :     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    3978             : 
    3979       18616 :     page = BufferGetPage(*buffer);
    3980       18616 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    3981             :     Assert(ItemIdIsNormal(lp));
    3982             : 
    3983       18616 :     tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
    3984       18616 :     tuple->t_len = ItemIdGetLength(lp);
    3985       18616 :     tuple->t_tableOid = RelationGetRelid(relation);
    3986             : 
    3987       18642 : l3:
    3988       18642 :     result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
    3989             : 
    3990       18642 :     if (result == TM_Invisible)
    3991             :     {
    3992             :         /*
    3993             :          * This is possible, but only when locking a tuple for ON CONFLICT
    3994             :          * UPDATE.  We return this value here rather than throwing an error in
    3995             :          * order to give that case the opportunity to throw a more specific
    3996             :          * error.
    3997             :          */
    3998          16 :         result = TM_Invisible;
    3999          16 :         goto out_locked;
    4000             :     }
    4001       18626 :     else if (result == TM_BeingModified ||
    4002       15160 :              result == TM_Updated ||
    4003             :              result == TM_Deleted)
    4004             :     {
    4005             :         TransactionId xwait;
    4006             :         uint16      infomask;
    4007             :         uint16      infomask2;
    4008             :         bool        require_sleep;
    4009             :         ItemPointerData t_ctid;
    4010             : 
    4011             :         /* must copy state data before unlocking buffer */
    4012        3468 :         xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
    4013        3468 :         infomask = tuple->t_data->t_infomask;
    4014        3468 :         infomask2 = tuple->t_data->t_infomask2;
    4015        3468 :         ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
    4016             : 
    4017        3468 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4018             : 
    4019             :         /*
    4020             :          * If any subtransaction of the current top transaction already holds
    4021             :          * a lock as strong as or stronger than what we're requesting, we
    4022             :          * effectively hold the desired lock already.  We *must* succeed
    4023             :          * without trying to take the tuple lock, else we will deadlock
    4024             :          * against anyone wanting to acquire a stronger lock.
    4025             :          *
    4026             :          * Note we only do this the first time we loop on the HTSU result;
    4027             :          * there is no point in testing in subsequent passes, because
    4028             :          * evidently our own transaction cannot have acquired a new lock after
    4029             :          * the first time we checked.
    4030             :          */
    4031        3468 :         if (first_time)
    4032             :         {
    4033        3450 :             first_time = false;
    4034             : 
    4035        3450 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4036             :             {
    4037             :                 int         i;
    4038             :                 int         nmembers;
    4039             :                 MultiXactMember *members;
    4040             : 
    4041             :                 /*
    4042             :                  * We don't need to allow old multixacts here; if that had
    4043             :                  * been the case, HeapTupleSatisfiesUpdate would have returned
    4044             :                  * MayBeUpdated and we wouldn't be here.
    4045             :                  */
    4046             :                 nmembers =
    4047         136 :                     GetMultiXactIdMembers(xwait, &members, false,
    4048         136 :                                           HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    4049             : 
    4050         422 :                 for (i = 0; i < nmembers; i++)
    4051             :                 {
    4052             :                     /* only consider members of our own transaction */
    4053         296 :                     if (!TransactionIdIsCurrentTransactionId(members[i].xid))
    4054         236 :                         continue;
    4055             : 
    4056          60 :                     if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
    4057             :                     {
    4058          10 :                         pfree(members);
    4059          10 :                         result = TM_Ok;
    4060          10 :                         goto out_unlocked;
    4061             :                     }
    4062             :                     else
    4063             :                     {
    4064             :                         /*
    4065             :                          * Disable acquisition of the heavyweight tuple lock.
    4066             :                          * Otherwise, when promoting a weaker lock, we might
    4067             :                          * deadlock with another locker that has acquired the
    4068             :                          * heavyweight tuple lock and is waiting for our
    4069             :                          * transaction to finish.
    4070             :                          *
    4071             :                          * Note that in this case we still need to wait for
    4072             :                          * the multixact if required, to avoid acquiring
    4073             :                          * conflicting locks.
    4074             :                          */
    4075          50 :                         skip_tuple_lock = true;
    4076             :                     }
    4077             :                 }
    4078             : 
    4079         126 :                 if (members)
    4080         126 :                     pfree(members);
    4081             :             }
    4082        3314 :             else if (TransactionIdIsCurrentTransactionId(xwait))
    4083             :             {
    4084        1142 :                 switch (mode)
    4085             :                 {
    4086         140 :                     case LockTupleKeyShare:
    4087             :                         Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
    4088             :                                HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4089             :                                HEAP_XMAX_IS_EXCL_LOCKED(infomask));
    4090         140 :                         result = TM_Ok;
    4091         140 :                         goto out_unlocked;
    4092         228 :                     case LockTupleShare:
    4093         228 :                         if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
    4094           8 :                             HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4095             :                         {
    4096         220 :                             result = TM_Ok;
    4097         220 :                             goto out_unlocked;
    4098             :                         }
    4099           8 :                         break;
    4100          62 :                     case LockTupleNoKeyExclusive:
    4101          62 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4102             :                         {
    4103          44 :                             result = TM_Ok;
    4104          44 :                             goto out_unlocked;
    4105             :                         }
    4106          18 :                         break;
    4107         712 :                     case LockTupleExclusive:
    4108         712 :                         if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
    4109         694 :                             infomask2 & HEAP_KEYS_UPDATED)
    4110             :                         {
    4111         694 :                             result = TM_Ok;
    4112         694 :                             goto out_unlocked;
    4113             :                         }
    4114          18 :                         break;
    4115             :                 }
    4116        2190 :             }
    4117             :         }
    4118             : 
    4119             :         /*
    4120             :          * Initially assume that we will have to wait for the locking
    4121             :          * transaction(s) to finish.  We check various cases below in which
    4122             :          * this can be turned off.
    4123             :          */
    4124        2360 :         require_sleep = true;
    4125        2360 :         if (mode == LockTupleKeyShare)
    4126             :         {
    4127             :             /*
    4128             :              * If we're requesting KeyShare, and there's no update present, we
    4129             :              * don't need to wait.  Even if there is an update, we can still
    4130             :              * continue if the key hasn't been modified.
    4131             :              *
    4132             :              * However, if there are updates, we need to walk the update chain
    4133             :              * to mark future versions of the row as locked, too.  That way,
    4134             :              * if somebody deletes that future version, we're protected
    4135             :              * against the key going away.  This locking of future versions
    4136             :              * could block momentarily, if a concurrent transaction is
    4137             :              * deleting a key; or it could return a value to the effect that
    4138             :              * the transaction deleting the key has already committed.  So we
    4139             :              * do this before re-locking the buffer; otherwise this would be
    4140             :              * prone to deadlocks.
    4141             :              *
    4142             :              * Note that the TID we're locking was grabbed before we unlocked
    4143             :              * the buffer.  For it to change while we're not looking, the
    4144             :              * other properties we're testing for below after re-locking the
    4145             :              * buffer would also change, in which case we would restart this
    4146             :              * loop above.
    4147             :              */
    4148        1076 :             if (!(infomask2 & HEAP_KEYS_UPDATED))
    4149             :             {
    4150             :                 bool        updated;
    4151             : 
    4152        1016 :                 updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
    4153             : 
    4154             :                 /*
    4155             :                  * If there are updates, follow the update chain; bail out if
    4156             :                  * that cannot be done.
    4157             :                  */
    4158        1016 :                 if (follow_updates && updated)
    4159             :                 {
    4160             :                     TM_Result   res;
    4161             : 
    4162          98 :                     res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
    4163             :                                                   GetCurrentTransactionId(),
    4164             :                                                   mode);
    4165          98 :                     if (res != TM_Ok)
    4166             :                     {
    4167          12 :                         result = res;
    4168             :                         /* recovery code expects to have buffer lock held */
    4169          12 :                         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4170         242 :                         goto failed;
    4171             :                     }
    4172             :                 }
    4173             : 
    4174        1004 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4175             : 
    4176             :                 /*
    4177             :                  * Make sure it's still an appropriate lock, else start over.
    4178             :                  * Also, if it wasn't updated before we released the lock, but
    4179             :                  * is updated now, we start over too; the reason is that we
    4180             :                  * now need to follow the update chain to lock the new
    4181             :                  * versions.
    4182             :                  */
    4183        1004 :                 if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
    4184          84 :                     ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
    4185          84 :                      !updated))
    4186          26 :                     goto l3;
    4187             : 
    4188             :                 /* Things look okay, so we can skip sleeping */
    4189        1004 :                 require_sleep = false;
    4190             : 
    4191             :                 /*
    4192             :                  * Note we allow Xmax to change here; other updaters/lockers
    4193             :                  * could have modified it before we grabbed the buffer lock.
    4194             :                  * However, this is not a problem, because with the recheck we
    4195             :                  * just did we ensure that they still don't conflict with the
    4196             :                  * lock we want.
    4197             :                  */
    4198             :             }
    4199             :         }
    4200        1284 :         else if (mode == LockTupleShare)
    4201             :         {
    4202             :             /*
    4203             :              * If we're requesting Share, we can similarly avoid sleeping if
    4204             :              * there's no update and no exclusive lock present.
    4205             :              */
    4206         820 :             if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
    4207         820 :                 !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
    4208             :             {
    4209         808 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4210             : 
    4211             :                 /*
    4212             :                  * Make sure it's still an appropriate lock, else start over.
    4213             :                  * See above about allowing xmax to change.
    4214             :                  */
    4215         808 :                 if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    4216         808 :                     HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
    4217           0 :                     goto l3;
    4218         808 :                 require_sleep = false;
    4219             :             }
    4220             :         }
    4221         464 :         else if (mode == LockTupleNoKeyExclusive)
    4222             :         {
    4223             :             /*
    4224             :              * If we're requesting NoKeyExclusive, we might also be able to
    4225             :              * avoid sleeping; just ensure that there no conflicting lock
    4226             :              * already acquired.
    4227             :              */
    4228         214 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4229             :             {
    4230          52 :                 if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
    4231             :                                              mode, NULL))
    4232             :                 {
    4233             :                     /*
    4234             :                      * No conflict, but if the xmax changed under us in the
    4235             :                      * meantime, start over.
    4236             :                      */
    4237          26 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4238          26 :                     if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4239          26 :                         !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4240             :                                              xwait))
    4241           0 :                         goto l3;
    4242             : 
    4243             :                     /* otherwise, we're good */
    4244          26 :                     require_sleep = false;
    4245             :                 }
    4246             :             }
    4247         162 :             else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
    4248             :             {
    4249          26 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4250             : 
    4251             :                 /* if the xmax changed in the meantime, start over */
    4252          26 :                 if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4253          26 :                     !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4254             :                                          xwait))
    4255           0 :                     goto l3;
    4256             :                 /* otherwise, we're good */
    4257          26 :                 require_sleep = false;
    4258             :             }
    4259             :         }
    4260             : 
    4261             :         /*
    4262             :          * As a check independent from those above, we can also avoid sleeping
    4263             :          * if the current transaction is the sole locker of the tuple.  Note
    4264             :          * that the strength of the lock already held is irrelevant; this is
    4265             :          * not about recording the lock in Xmax (which will be done regardless
    4266             :          * of this optimization, below).  Also, note that the cases where we
    4267             :          * hold a lock stronger than we are requesting are already handled
    4268             :          * above by not doing anything.
    4269             :          *
    4270             :          * Note we only deal with the non-multixact case here; MultiXactIdWait
    4271             :          * is well equipped to deal with this situation on its own.
    4272             :          */
    4273        2752 :         if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
    4274         404 :             TransactionIdIsCurrentTransactionId(xwait))
    4275             :         {
    4276             :             /* ... but if the xmax changed in the meantime, start over */
    4277          18 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4278          18 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4279          18 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4280             :                                      xwait))
    4281           0 :                 goto l3;
    4282             :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
    4283          18 :             require_sleep = false;
    4284             :         }
    4285             : 
    4286             :         /*
    4287             :          * Time to sleep on the other transaction/multixact, if necessary.
    4288             :          *
    4289             :          * If the other transaction is an update/delete that's already
    4290             :          * committed, then sleeping cannot possibly do any good: if we're
    4291             :          * required to sleep, get out to raise an error instead.
    4292             :          *
    4293             :          * By here, we either have already acquired the buffer exclusive lock,
    4294             :          * or we must wait for the locking transaction or multixact; so below
    4295             :          * we ensure that we grab buffer lock after the sleep.
    4296             :          */
    4297        2348 :         if (require_sleep && (result == TM_Updated || result == TM_Deleted))
    4298             :         {
    4299         154 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4300         154 :             goto failed;
    4301             :         }
    4302        2194 :         else if (require_sleep)
    4303             :         {
    4304             :             /*
    4305             :              * Acquire tuple lock to establish our priority for the tuple, or
    4306             :              * die trying.  LockTuple will release us when we are next-in-line
    4307             :              * for the tuple.  We must do this even if we are share-locking,
    4308             :              * but not if we already have a weaker lock on the tuple.
    4309             :              *
    4310             :              * If we are forced to "start over" below, we keep the tuple lock;
    4311             :              * this arranges that we stay at the head of the line while
    4312             :              * rechecking tuple state.
    4313             :              */
    4314         312 :             if (!skip_tuple_lock &&
    4315         282 :                 !heap_acquire_tuplock(relation, tid, mode, wait_policy,
    4316             :                                       &have_tuple_lock))
    4317             :             {
    4318             :                 /*
    4319             :                  * This can only happen if wait_policy is Skip and the lock
    4320             :                  * couldn't be obtained.
    4321             :                  */
    4322           2 :                 result = TM_WouldBlock;
    4323             :                 /* recovery code expects to have buffer lock held */
    4324           2 :                 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4325           2 :                 goto failed;
    4326             :             }
    4327             : 
    4328         308 :             if (infomask & HEAP_XMAX_IS_MULTI)
    4329             :             {
    4330          78 :                 MultiXactStatus status = get_mxact_status_for_lock(mode, false);
    4331             : 
    4332             :                 /* We only ever lock tuples, never update them */
    4333          78 :                 if (status >= MultiXactStatusNoKeyUpdate)
    4334           0 :                     elog(ERROR, "invalid lock mode in heap_lock_tuple");
    4335             : 
    4336             :                 /* wait for multixact to end, or die trying  */
    4337          78 :                 switch (wait_policy)
    4338             :                 {
    4339          70 :                     case LockWaitBlock:
    4340          70 :                         MultiXactIdWait((MultiXactId) xwait, status, infomask,
    4341             :                                         relation, &tuple->t_self, XLTW_Lock, NULL);
    4342          70 :                         break;
    4343           4 :                     case LockWaitSkip:
    4344           4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    4345             :                                                         status, infomask, relation,
    4346             :                                                         NULL))
    4347             :                         {
    4348           4 :                             result = TM_WouldBlock;
    4349             :                             /* recovery code expects to have buffer lock held */
    4350           4 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4351           4 :                             goto failed;
    4352             :                         }
    4353           0 :                         break;
    4354           4 :                     case LockWaitError:
    4355           4 :                         if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
    4356             :                                                         status, infomask, relation,
    4357             :                                                         NULL))
    4358           4 :                             ereport(ERROR,
    4359             :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    4360             :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    4361             :                                             RelationGetRelationName(relation))));
    4362             : 
    4363           0 :                         break;
    4364             :                 }
    4365             : 
    4366             :                 /*
    4367             :                  * Of course, the multixact might not be done here: if we're
    4368             :                  * requesting a light lock mode, other transactions with light
    4369             :                  * locks could still be alive, as well as locks owned by our
    4370             :                  * own xact or other subxacts of this backend.  We need to
    4371             :                  * preserve the surviving MultiXact members.  Note that it
    4372             :                  * isn't absolutely necessary in the latter case, but doing so
    4373             :                  * is simpler.
    4374             :                  */
    4375          70 :             }
    4376             :             else
    4377             :             {
    4378             :                 /* wait for regular transaction to end, or die trying */
    4379         230 :                 switch (wait_policy)
    4380             :                 {
    4381         152 :                     case LockWaitBlock:
    4382         152 :                         XactLockTableWait(xwait, relation, &tuple->t_self,
    4383             :                                           XLTW_Lock);
    4384         152 :                         break;
    4385          66 :                     case LockWaitSkip:
    4386          66 :                         if (!ConditionalXactLockTableWait(xwait))
    4387             :                         {
    4388          66 :                             result = TM_WouldBlock;
    4389             :                             /* recovery code expects to have buffer lock held */
    4390          66 :                             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4391          66 :                             goto failed;
    4392             :                         }
    4393           0 :                         break;
    4394          12 :                     case LockWaitError:
    4395          12 :                         if (!ConditionalXactLockTableWait(xwait))
    4396          12 :                             ereport(ERROR,
    4397             :                                     (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    4398             :                                      errmsg("could not obtain lock on row in relation \"%s\"",
    4399             :                                             RelationGetRelationName(relation))));
    4400           0 :                         break;
    4401             :                 }
    4402         222 :             }
    4403             : 
    4404             :             /* if there are updates, follow the update chain */
    4405         222 :             if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
    4406             :             {
    4407             :                 TM_Result   res;
    4408             : 
    4409          72 :                 res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
    4410             :                                               GetCurrentTransactionId(),
    4411             :                                               mode);
    4412          72 :                 if (res != TM_Ok)
    4413             :                 {
    4414           4 :                     result = res;
    4415             :                     /* recovery code expects to have buffer lock held */
    4416           4 :                     LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4417           4 :                     goto failed;
    4418             :                 }
    4419             :             }
    4420             : 
    4421         218 :             LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4422             : 
    4423             :             /*
    4424             :              * xwait is done, but if xwait had just locked the tuple then some
    4425             :              * other xact could update this tuple before we get to this point.
    4426             :              * Check for xmax change, and start over if so.
    4427             :              */
    4428         218 :             if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
    4429         196 :                 !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
    4430             :                                      xwait))
    4431          26 :                 goto l3;
    4432             : 
    4433         192 :             if (!(infomask & HEAP_XMAX_IS_MULTI))
    4434             :             {
    4435             :                 /*
    4436             :                  * Otherwise check if it committed or aborted.  Note we cannot
    4437             :                  * be here if the tuple was only locked by somebody who didn't
    4438             :                  * conflict with us; that would have been handled above.  So
    4439             :                  * that transaction must necessarily be gone by now.  But
    4440             :                  * don't check for this in the multixact case, because some
    4441             :                  * locker transactions might still be running.
    4442             :                  */
    4443         130 :                 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
    4444             :             }
    4445             :         }
    4446             : 
    4447             :         /* By here, we're certain that we hold buffer exclusive lock again */
    4448             : 
    4449             :         /*
    4450             :          * We may lock if previous xmax aborted, or if it committed but only
    4451             :          * locked the tuple without updating it; or if we didn't have to wait
    4452             :          * at all for whatever reason.
    4453             :          */
    4454        2074 :         if (!require_sleep ||
    4455         192 :             (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
    4456         232 :             HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
    4457         100 :             HeapTupleHeaderIsOnlyLocked(tuple->t_data))
    4458        1984 :             result = TM_Ok;
    4459          90 :         else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid) ||
    4460          16 :                  HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data))
    4461          74 :             result = TM_Updated;
    4462             :         else
    4463          16 :             result = TM_Deleted;
    4464             :     }
    4465             : 
    4466       15158 : failed:
    4467       17474 :     if (result != TM_Ok)
    4468             :     {
    4469             :         Assert(result == TM_SelfModified || result == TM_Updated ||
    4470             :                result == TM_Deleted || result == TM_WouldBlock);
    4471             :         Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
    4472             :         Assert(result != TM_Updated ||
    4473             :                !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
    4474         340 :         tmfd->ctid = tuple->t_data->t_ctid;
    4475         340 :         tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    4476         340 :         if (result == TM_SelfModified)
    4477           8 :             tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
    4478             :         else
    4479         332 :             tmfd->cmax = InvalidCommandId;
    4480         340 :         goto out_locked;
    4481             :     }
    4482             : 
    4483             :     /*
    4484             :      * If we didn't pin the visibility map page and the page has become all
    4485             :      * visible while we were busy locking the buffer, or during some
    4486             :      * subsequent window during which we had it unlocked, we'll have to unlock
    4487             :      * and re-lock, to avoid holding the buffer lock across I/O.  That's a bit
    4488             :      * unfortunate, especially since we'll now have to recheck whether the
    4489             :      * tuple has been locked or updated under us, but hopefully it won't
    4490             :      * happen very often.
    4491             :      */
    4492       17134 :     if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
    4493             :     {
    4494           0 :         LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4495           0 :         visibilitymap_pin(relation, block, &vmbuffer);
    4496           0 :         LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
    4497           0 :         goto l3;
    4498             :     }
    4499             : 
    4500       17134 :     xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
    4501       17134 :     old_infomask = tuple->t_data->t_infomask;
    4502             : 
    4503             :     /*
    4504             :      * If this is the first possibly-multixact-able operation in the current
    4505             :      * transaction, set my per-backend OldestMemberMXactId setting. We can be
    4506             :      * certain that the transaction will never become a member of any older
    4507             :      * MultiXactIds than that.  (We have to do this even if we end up just
    4508             :      * using our own TransactionId below, since some other backend could
    4509             :      * incorporate our XID into a MultiXact immediately afterwards.)
    4510             :      */
    4511       17134 :     MultiXactIdSetOldestMember();
    4512             : 
    4513             :     /*
    4514             :      * Compute the new xmax and infomask to store into the tuple.  Note we do
    4515             :      * not modify the tuple just yet, because that would leave it in the wrong
    4516             :      * state if multixact.c elogs.
    4517             :      */
    4518       17134 :     compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
    4519             :                               GetCurrentTransactionId(), mode, false,
    4520             :                               &xid, &new_infomask, &new_infomask2);
    4521             : 
    4522       17134 :     START_CRIT_SECTION();
    4523             : 
    4524             :     /*
    4525             :      * Store transaction information of xact locking the tuple.
    4526             :      *
    4527             :      * Note: Cmax is meaningless in this context, so don't set it; this avoids
    4528             :      * possibly generating a useless combo CID.  Moreover, if we're locking a
    4529             :      * previously updated tuple, it's important to preserve the Cmax.
    4530             :      *
    4531             :      * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
    4532             :      * we would break the HOT chain.
    4533             :      */
    4534       17134 :     tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
    4535       17134 :     tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    4536       17134 :     tuple->t_data->t_infomask |= new_infomask;
    4537       17134 :     tuple->t_data->t_infomask2 |= new_infomask2;
    4538       17134 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    4539       17058 :         HeapTupleHeaderClearHotUpdated(tuple->t_data);
    4540       17134 :     HeapTupleHeaderSetXmax(tuple->t_data, xid);
    4541             : 
    4542             :     /*
    4543             :      * Make sure there is no forward chain link in t_ctid.  Note that in the
    4544             :      * cases where the tuple has been updated, we must not overwrite t_ctid,
    4545             :      * because it was set by the updater.  Moreover, if the tuple has been
    4546             :      * updated, we need to follow the update chain to lock the new versions of
    4547             :      * the tuple as well.
    4548             :      */
    4549       17134 :     if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
    4550       17058 :         tuple->t_data->t_ctid = *tid;
    4551             : 
    4552             :     /* Clear only the all-frozen bit on visibility map if needed */
    4553       20212 :     if (PageIsAllVisible(page) &&
    4554        3078 :         visibilitymap_clear(relation, block, vmbuffer,
    4555             :                             VISIBILITYMAP_ALL_FROZEN))
    4556          28 :         cleared_all_frozen = true;
    4557             : 
    4558             : 
    4559       17134 :     MarkBufferDirty(*buffer);
    4560             : 
    4561             :     /*
    4562             :      * XLOG stuff.  You might think that we don't need an XLOG record because
    4563             :      * there is no state change worth restoring after a crash.  You would be
    4564             :      * wrong however: we have just written either a TransactionId or a
    4565             :      * MultiXactId that may never have been seen on disk before, and we need
    4566             :      * to make sure that there are XLOG entries covering those ID numbers.
    4567             :      * Else the same IDs might be re-used after a crash, which would be
    4568             :      * disastrous if this page made it to disk before the crash.  Essentially
    4569             :      * we have to enforce the WAL log-before-data rule even in this case.
    4570             :      * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
    4571             :      * entries for everything anyway.)
    4572             :      */
    4573       17134 :     if (RelationNeedsWAL(relation))
    4574             :     {
    4575             :         xl_heap_lock xlrec;
    4576             :         XLogRecPtr  recptr;
    4577             : 
    4578       16174 :         XLogBeginInsert();
    4579       16174 :         XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
    4580             : 
    4581       16174 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    4582       16174 :         xlrec.locking_xid = xid;
    4583       32348 :         xlrec.infobits_set = compute_infobits(new_infomask,
    4584       16174 :                                               tuple->t_data->t_infomask2);
    4585       16174 :         xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    4586       16174 :         XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
    4587             : 
    4588             :         /* we don't decode row locks atm, so no need to log the origin */
    4589             : 
    4590       16174 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
    4591             : 
    4592       16174 :         PageSetLSN(page, recptr);
    4593             :     }
    4594             : 
    4595       17134 :     END_CRIT_SECTION();
    4596             : 
    4597       17134 :     result = TM_Ok;
    4598             : 
    4599       17490 : out_locked:
    4600       17490 :     LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
    4601             : 
    4602       18598 : out_unlocked:
    4603       18598 :     if (BufferIsValid(vmbuffer))
    4604        3078 :         ReleaseBuffer(vmbuffer);
    4605             : 
    4606             :     /*
    4607             :      * Don't update the visibility map here. Locking a tuple doesn't change
    4608             :      * visibility info.
    4609             :      */
    4610             : 
    4611             :     /*
    4612             :      * Now that we have successfully marked the tuple as locked, we can
    4613             :      * release the lmgr tuple lock, if we had it.
    4614             :      */
    4615       18598 :     if (have_tuple_lock)
    4616         252 :         UnlockTupleTuplock(relation, tid, mode);
    4617             : 
    4618       18598 :     return result;
    4619             : }
    4620             : 
    4621             : /*
    4622             :  * Acquire heavyweight lock on the given tuple, in preparation for acquiring
    4623             :  * its normal, Xmax-based tuple lock.
    4624             :  *
    4625             :  * have_tuple_lock is an input and output parameter: on input, it indicates
    4626             :  * whether the lock has previously been acquired (and this function does
    4627             :  * nothing in that case).  If this function returns success, have_tuple_lock
    4628             :  * has been flipped to true.
    4629             :  *
    4630             :  * Returns false if it was unable to obtain the lock; this can only happen if
    4631             :  * wait_policy is Skip.
    4632             :  */
    4633             : static bool
    4634         426 : heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
    4635             :                      LockWaitPolicy wait_policy, bool *have_tuple_lock)
    4636             : {
    4637         426 :     if (*have_tuple_lock)
    4638          18 :         return true;
    4639             : 
    4640         408 :     switch (wait_policy)
    4641             :     {
    4642         326 :         case LockWaitBlock:
    4643         326 :             LockTupleTuplock(relation, tid, mode);
    4644         326 :             break;
    4645             : 
    4646          68 :         case LockWaitSkip:
    4647          68 :             if (!ConditionalLockTupleTuplock(relation, tid, mode))
    4648           2 :                 return false;
    4649          66 :             break;
    4650             : 
    4651          14 :         case LockWaitError:
    4652          14 :             if (!ConditionalLockTupleTuplock(relation, tid, mode))
    4653           2 :                 ereport(ERROR,
    4654             :                         (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
    4655             :                          errmsg("could not obtain lock on row in relation \"%s\"",
    4656             :                                 RelationGetRelationName(relation))));
    4657          12 :             break;
    4658             :     }
    4659         404 :     *have_tuple_lock = true;
    4660             : 
    4661         404 :     return true;
    4662             : }
    4663             : 
    4664             : /*
    4665             :  * Given an original set of Xmax and infomask, and a transaction (identified by
    4666             :  * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
    4667             :  * corresponding infomasks to use on the tuple.
    4668             :  *
    4669             :  * Note that this might have side effects such as creating a new MultiXactId.
    4670             :  *
    4671             :  * Most callers will have called HeapTupleSatisfiesUpdate before this function;
    4672             :  * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
    4673             :  * but it was not running anymore. There is a race condition, which is that the
    4674             :  * MultiXactId may have finished since then, but that uncommon case is handled
    4675             :  * either here, or within MultiXactIdExpand.
    4676             :  *
    4677             :  * There is a similar race condition possible when the old xmax was a regular
    4678             :  * TransactionId.  We test TransactionIdIsInProgress again just to narrow the
    4679             :  * window, but it's still possible to end up creating an unnecessary
    4680             :  * MultiXactId.  Fortunately this is harmless.
    4681             :  */
    4682             : static void
    4683     2106674 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
    4684             :                           uint16 old_infomask2, TransactionId add_to_xmax,
    4685             :                           LockTupleMode mode, bool is_update,
    4686             :                           TransactionId *result_xmax, uint16 *result_infomask,
    4687             :                           uint16 *result_infomask2)
    4688             : {
    4689             :     TransactionId new_xmax;
    4690             :     uint16      new_infomask,
    4691             :                 new_infomask2;
    4692             : 
    4693             :     Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
    4694             : 
    4695     2106674 : l5:
    4696     2106674 :     new_infomask = 0;
    4697     2106674 :     new_infomask2 = 0;
    4698     2106674 :     if (old_infomask & HEAP_XMAX_INVALID)
    4699             :     {
    4700             :         /*
    4701             :          * No previous locker; we just insert our own TransactionId.
    4702             :          *
    4703             :          * Note that it's critical that this case be the first one checked,
    4704             :          * because there are several blocks below that come back to this one
    4705             :          * to implement certain optimizations; old_infomask might contain
    4706             :          * other dirty bits in those cases, but we don't really care.
    4707             :          */
    4708     2092114 :         if (is_update)
    4709             :         {
    4710     1957774 :             new_xmax = add_to_xmax;
    4711     1957774 :             if (mode == LockTupleExclusive)
    4712     1623768 :                 new_infomask2 |= HEAP_KEYS_UPDATED;
    4713             :         }
    4714             :         else
    4715             :         {
    4716      134340 :             new_infomask |= HEAP_XMAX_LOCK_ONLY;
    4717      134340 :             switch (mode)
    4718             :             {
    4719        3308 :                 case LockTupleKeyShare:
    4720        3308 :                     new_xmax = add_to_xmax;
    4721        3308 :                     new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
    4722        3308 :                     break;
    4723        1316 :                 case LockTupleShare:
    4724        1316 :                     new_xmax = add_to_xmax;
    4725        1316 :                     new_infomask |= HEAP_XMAX_SHR_LOCK;
    4726        1316 :                     break;
    4727      125076 :                 case LockTupleNoKeyExclusive:
    4728      125076 :                     new_xmax = add_to_xmax;
    4729      125076 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    4730      125076 :                     break;
    4731        4640 :                 case LockTupleExclusive:
    4732        4640 :                     new_xmax = add_to_xmax;
    4733        4640 :                     new_infomask |= HEAP_XMAX_EXCL_LOCK;
    4734        4640 :                     new_infomask2 |= HEAP_KEYS_UPDATED;
    4735        4640 :                     break;
    4736           0 :                 default:
    4737           0 :                     new_xmax = InvalidTransactionId;    /* silence compiler */
    4738           0 :                     elog(ERROR, "invalid lock mode");
    4739             :             }
    4740             :         }
    4741             :     }
    4742       14560 :     else if (old_infomask & HEAP_XMAX_IS_MULTI)
    4743             :     {
    4744             :         MultiXactStatus new_status;
    4745             : 
    4746             :         /*
    4747             :          * Currently we don't allow XMAX_COMMITTED to be set for multis, so
    4748             :          * cross-check.
    4749             :          */
    4750             :         Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
    4751             : 
    4752             :         /*
    4753             :          * A multixact together with LOCK_ONLY set but neither lock bit set
    4754             :          * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
    4755             :          * anymore.  This check is critical for databases upgraded by
    4756             :          * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
    4757             :          * that such multis are never passed.
    4758             :          */
    4759         192 :         if (HEAP_LOCKED_UPGRADED(old_infomask))
    4760             :         {
    4761           0 :             old_infomask &= ~HEAP_XMAX_IS_MULTI;
    4762           0 :             old_infomask |= HEAP_XMAX_INVALID;
    4763           0 :             goto l5;
    4764             :         }
    4765             : 
    4766             :         /*
    4767             :          * If the XMAX is already a MultiXactId, then we need to expand it to
    4768             :          * include add_to_xmax; but if all the members were lockers and are
    4769             :          * all gone, we can do away with the IS_MULTI bit and just set
    4770             :          * add_to_xmax as the only locker/updater.  If all lockers are gone
    4771             :          * and we have an updater that aborted, we can also do without a
    4772             :          * multi.
    4773             :          *
    4774             :          * The cost of doing GetMultiXactIdMembers would be paid by
    4775             :          * MultiXactIdExpand if we weren't to do this, so this check is not
    4776             :          * incurring extra work anyhow.
    4777             :          */
    4778         192 :         if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
    4779             :         {
    4780          48 :             if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
    4781          16 :                 !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
    4782             :                                                                 old_infomask)))
    4783             :             {
    4784             :                 /*
    4785             :                  * Reset these bits and restart; otherwise fall through to
    4786             :                  * create a new multi below.
    4787             :                  */
    4788          48 :                 old_infomask &= ~HEAP_XMAX_IS_MULTI;
    4789          48 :                 old_infomask |= HEAP_XMAX_INVALID;
    4790          48 :                 goto l5;
    4791             :             }
    4792             :         }
    4793             : 
    4794         144 :         new_status = get_mxact_status_for_lock(mode, is_update);
    4795             : 
    4796         144 :         new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
    4797             :                                      new_status);
    4798         144 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    4799             :     }
    4800       14368 :     else if (old_infomask & HEAP_XMAX_COMMITTED)
    4801             :     {
    4802             :         /*
    4803             :          * It's a committed update, so we need to preserve him as updater of
    4804             :          * the tuple.
    4805             :          */
    4806             :         MultiXactStatus status;
    4807             :         MultiXactStatus new_status;
    4808             : 
    4809          26 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    4810           0 :             status = MultiXactStatusUpdate;
    4811             :         else
    4812          26 :             status = MultiXactStatusNoKeyUpdate;
    4813             : 
    4814          26 :         new_status = get_mxact_status_for_lock(mode, is_update);
    4815             : 
    4816             :         /*
    4817             :          * since it's not running, it's obviously impossible for the old
    4818             :          * updater to be identical to the current one, so we need not check
    4819             :          * for that case as we do in the block above.
    4820             :          */
    4821          26 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    4822          26 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    4823             :     }
    4824       14342 :     else if (TransactionIdIsInProgress(xmax))
    4825             :     {
    4826             :         /*
    4827             :          * If the XMAX is a valid, in-progress TransactionId, then we need to
    4828             :          * create a new MultiXactId that includes both the old locker or
    4829             :          * updater and our own TransactionId.
    4830             :          */
    4831             :         MultiXactStatus new_status;
    4832             :         MultiXactStatus old_status;
    4833             :         LockTupleMode old_mode;
    4834             : 
    4835       14326 :         if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    4836             :         {
    4837       28552 :             if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    4838        1064 :                 old_status = MultiXactStatusForKeyShare;
    4839       13212 :             else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    4840         802 :                 old_status = MultiXactStatusForShare;
    4841       12410 :             else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    4842             :             {
    4843       12410 :                 if (old_infomask2 & HEAP_KEYS_UPDATED)
    4844        1138 :                     old_status = MultiXactStatusForUpdate;
    4845             :                 else
    4846       11272 :                     old_status = MultiXactStatusForNoKeyUpdate;
    4847             :             }
    4848             :             else
    4849             :             {
    4850             :                 /*
    4851             :                  * LOCK_ONLY can be present alone only when a page has been
    4852             :                  * upgraded by pg_upgrade.  But in that case,
    4853             :                  * TransactionIdIsInProgress() should have returned false.  We
    4854             :                  * assume it's no longer locked in this case.
    4855             :                  */
    4856           0 :                 elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
    4857           0 :                 old_infomask |= HEAP_XMAX_INVALID;
    4858           0 :                 old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
    4859           0 :                 goto l5;
    4860             :             }
    4861             :         }
    4862             :         else
    4863             :         {
    4864             :             /* it's an update, but which kind? */
    4865          50 :             if (old_infomask2 & HEAP_KEYS_UPDATED)
    4866           0 :                 old_status = MultiXactStatusUpdate;
    4867             :             else
    4868          50 :                 old_status = MultiXactStatusNoKeyUpdate;
    4869             :         }
    4870             : 
    4871       14326 :         old_mode = TUPLOCK_from_mxstatus(old_status);
    4872             : 
    4873             :         /*
    4874             :          * If the lock to be acquired is for the same TransactionId as the
    4875             :          * existing lock, there's an optimization possible: consider only the
    4876             :          * strongest of both locks as the only one present, and restart.
    4877             :          */
    4878       14326 :         if (xmax == add_to_xmax)
    4879             :         {
    4880             :             /*
    4881             :              * Note that it's not possible for the original tuple to be
    4882             :              * updated: we wouldn't be here because the tuple would have been
    4883             :              * invisible and we wouldn't try to update it.  As a subtlety,
    4884             :              * this code can also run when traversing an update chain to lock
    4885             :              * future versions of a tuple.  But we wouldn't be here either,
    4886             :              * because the add_to_xmax would be different from the original
    4887             :              * updater.
    4888             :              */
    4889             :             Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    4890             : 
    4891             :             /* acquire the strongest of both */
    4892       12460 :             if (mode < old_mode)
    4893         360 :                 mode = old_mode;
    4894             :             /* mustn't touch is_update */
    4895             : 
    4896       12460 :             old_infomask |= HEAP_XMAX_INVALID;
    4897       12460 :             goto l5;
    4898             :         }
    4899             : 
    4900             :         /* otherwise, just fall back to creating a new multixact */
    4901        1866 :         new_status = get_mxact_status_for_lock(mode, is_update);
    4902        1866 :         new_xmax = MultiXactIdCreate(xmax, old_status,
    4903             :                                      add_to_xmax, new_status);
    4904        1866 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    4905             :     }
    4906          26 :     else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
    4907          10 :              TransactionIdDidCommit(xmax))
    4908           2 :     {
    4909             :         /*
    4910             :          * It's a committed update, so we gotta preserve him as updater of the
    4911             :          * tuple.
    4912             :          */
    4913             :         MultiXactStatus status;
    4914             :         MultiXactStatus new_status;
    4915             : 
    4916           2 :         if (old_infomask2 & HEAP_KEYS_UPDATED)
    4917           0 :             status = MultiXactStatusUpdate;
    4918             :         else
    4919           2 :             status = MultiXactStatusNoKeyUpdate;
    4920             : 
    4921           2 :         new_status = get_mxact_status_for_lock(mode, is_update);
    4922             : 
    4923             :         /*
    4924             :          * since it's not running, it's obviously impossible for the old
    4925             :          * updater to be identical to the current one, so we need not check
    4926             :          * for that case as we do in the block above.
    4927             :          */
    4928           2 :         new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
    4929           2 :         GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
    4930             :     }
    4931             :     else
    4932             :     {
    4933             :         /*
    4934             :          * Can get here iff the locking/updating transaction was running when
    4935             :          * the infomask was extracted from the tuple, but finished before
    4936             :          * TransactionIdIsInProgress got to run.  Deal with it as if there was
    4937             :          * no locker at all in the first place.
    4938             :          */
    4939          14 :         old_infomask |= HEAP_XMAX_INVALID;
    4940          14 :         goto l5;
    4941             :     }
    4942             : 
    4943     2094152 :     *result_infomask = new_infomask;
    4944     2094152 :     *result_infomask2 = new_infomask2;
    4945     2094152 :     *result_xmax = new_xmax;
    4946     2094152 : }
    4947             : 
    4948             : /*
    4949             :  * Subroutine for heap_lock_updated_tuple_rec.
    4950             :  *
    4951             :  * Given a hypothetical multixact status held by the transaction identified
    4952             :  * with the given xid, does the current transaction need to wait, fail, or can
    4953             :  * it continue if it wanted to acquire a lock of the given mode?  "needwait"
    4954             :  * is set to true if waiting is necessary; if it can continue, then TM_Ok is
    4955             :  * returned.  If the lock is already held by the current transaction, return
    4956             :  * TM_SelfModified.  In case of a conflict with another transaction, a
    4957             :  * different HeapTupleSatisfiesUpdate return code is returned.
    4958             :  *
    4959             :  * The held status is said to be hypothetical because it might correspond to a
    4960             :  * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
    4961             :  * way for simplicity of API.
    4962             :  */
    4963             : static TM_Result
    4964          60 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
    4965             :                            LockTupleMode mode, HeapTuple tup,
    4966             :                            bool *needwait)
    4967             : {
    4968             :     MultiXactStatus wantedstatus;
    4969             : 
    4970          60 :     *needwait = false;
    4971          60 :     wantedstatus = get_mxact_status_for_lock(mode, false);
    4972             : 
    4973             :     /*
    4974             :      * Note: we *must* check TransactionIdIsInProgress before
    4975             :      * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
    4976             :      * for an explanation.
    4977             :      */
    4978          60 :     if (TransactionIdIsCurrentTransactionId(xid))
    4979             :     {
    4980             :         /*
    4981             :          * The tuple has already been locked by our own transaction.  This is
    4982             :          * very rare but can happen if multiple transactions are trying to
    4983             :          * lock an ancient version of the same tuple.
    4984             :          */
    4985           0 :         return TM_SelfModified;
    4986             :     }
    4987          60 :     else if (TransactionIdIsInProgress(xid))
    4988             :     {
    4989             :         /*
    4990             :          * If the locking transaction is running, what we do depends on
    4991             :          * whether the lock modes conflict: if they do, then we must wait for
    4992             :          * it to finish; otherwise we can fall through to lock this tuple
    4993             :          * version without waiting.
    4994             :          */
    4995          30 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    4996          30 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    4997             :         {
    4998          16 :             *needwait = true;
    4999             :         }
    5000             : 
    5001             :         /*
    5002             :          * If we set needwait above, then this value doesn't matter;
    5003             :          * otherwise, this value signals to caller that it's okay to proceed.
    5004             :          */
    5005          30 :         return TM_Ok;
    5006             :     }
    5007          30 :     else if (TransactionIdDidAbort(xid))
    5008           6 :         return TM_Ok;
    5009          24 :     else if (TransactionIdDidCommit(xid))
    5010             :     {
    5011             :         /*
    5012             :          * The other transaction committed.  If it was only a locker, then the
    5013             :          * lock is completely gone now and we can return success; but if it
    5014             :          * was an update, then what we do depends on whether the two lock
    5015             :          * modes conflict.  If they conflict, then we must report error to
    5016             :          * caller. But if they don't, we can fall through to allow the current
    5017             :          * transaction to lock the tuple.
    5018             :          *
    5019             :          * Note: the reason we worry about ISUPDATE here is because as soon as
    5020             :          * a transaction ends, all its locks are gone and meaningless, and
    5021             :          * thus we can ignore them; whereas its updates persist.  In the
    5022             :          * TransactionIdIsInProgress case, above, we don't need to check
    5023             :          * because we know the lock is still "alive" and thus a conflict needs
    5024             :          * always be checked.
    5025             :          */
    5026          24 :         if (!ISUPDATE_from_mxstatus(status))
    5027           6 :             return TM_Ok;
    5028             : 
    5029          18 :         if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
    5030          18 :                                 LOCKMODE_from_mxstatus(wantedstatus)))
    5031             :         {
    5032             :             /* bummer */
    5033          16 :             if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid) ||
    5034           4 :                 HeapTupleHeaderIndicatesMovedPartitions(tup->t_data))
    5035          12 :                 return TM_Updated;
    5036             :             else
    5037           4 :                 return TM_Deleted;
    5038             :         }
    5039             : 
    5040           2 :         return TM_Ok;
    5041             :     }
    5042             : 
    5043             :     /* Not in progress, not aborted, not committed -- must have crashed */
    5044           0 :     return TM_Ok;
    5045             : }
    5046             : 
    5047             : 
    5048             : /*
    5049             :  * Recursive part of heap_lock_updated_tuple
    5050             :  *
    5051             :  * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
    5052             :  * xid with the given mode; if this tuple is updated, recurse to lock the new
    5053             :  * version as well.
    5054             :  */
    5055             : static TM_Result
    5056         154 : heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
    5057             :                             LockTupleMode mode)
    5058             : {
    5059             :     TM_Result   result;
    5060             :     ItemPointerData tupid;
    5061             :     HeapTupleData mytup;
    5062             :     Buffer      buf;
    5063             :     uint16      new_infomask,
    5064             :                 new_infomask2,
    5065             :                 old_infomask,
    5066             :                 old_infomask2;
    5067             :     TransactionId xmax,
    5068             :                 new_xmax;
    5069         154 :     TransactionId priorXmax = InvalidTransactionId;
    5070         154 :     bool        cleared_all_frozen = false;
    5071             :     bool        pinned_desired_page;
    5072         154 :     Buffer      vmbuffer = InvalidBuffer;
    5073             :     BlockNumber block;
    5074             : 
    5075         154 :     ItemPointerCopy(tid, &tupid);
    5076             : 
    5077             :     for (;;)
    5078             :     {
    5079           6 :         new_infomask = 0;
    5080         160 :         new_xmax = InvalidTransactionId;
    5081         160 :         block = ItemPointerGetBlockNumber(&tupid);
    5082         160 :         ItemPointerCopy(&tupid, &(mytup.t_self));
    5083             : 
    5084         160 :         if (!heap_fetch(rel, SnapshotAny, &mytup, &buf))
    5085             :         {
    5086             :             /*
    5087             :              * if we fail to find the updated version of the tuple, it's
    5088             :              * because it was vacuumed/pruned away after its creator
    5089             :              * transaction aborted.  So behave as if we got to the end of the
    5090             :              * chain, and there's no further tuple to lock: return success to
    5091             :              * caller.
    5092             :              */
    5093           0 :             result = TM_Ok;
    5094           0 :             goto out_unlocked;
    5095             :         }
    5096             : 
    5097         160 : l4:
    5098         176 :         CHECK_FOR_INTERRUPTS();
    5099             : 
    5100             :         /*
    5101             :          * Before locking the buffer, pin the visibility map page if it
    5102             :          * appears to be necessary.  Since we haven't got the lock yet,
    5103             :          * someone else might be in the middle of changing this, so we'll need
    5104             :          * to recheck after we have the lock.
    5105             :          */
    5106         176 :         if (PageIsAllVisible(BufferGetPage(buf)))
    5107             :         {
    5108           0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5109           0 :             pinned_desired_page = true;
    5110             :         }
    5111             :         else
    5112         176 :             pinned_desired_page = false;
    5113             : 
    5114         176 :         LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5115             : 
    5116             :         /*
    5117             :          * If we didn't pin the visibility map page and the page has become
    5118             :          * all visible while we were busy locking the buffer, we'll have to
    5119             :          * unlock and re-lock, to avoid holding the buffer lock across I/O.
    5120             :          * That's a bit unfortunate, but hopefully shouldn't happen often.
    5121             :          *
    5122             :          * Note: in some paths through this function, we will reach here
    5123             :          * holding a pin on a vm page that may or may not be the one matching
    5124             :          * this page.  If this page isn't all-visible, we won't use the vm
    5125             :          * page, but we hold onto such a pin till the end of the function.
    5126             :          */
    5127         176 :         if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
    5128             :         {
    5129           0 :             LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5130           0 :             visibilitymap_pin(rel, block, &vmbuffer);
    5131           0 :             LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
    5132             :         }
    5133             : 
    5134             :         /*
    5135             :          * Check the tuple XMIN against prior XMAX, if any.  If we reached the
    5136             :          * end of the chain, we're done, so return success.
    5137             :          */
    5138         182 :         if (TransactionIdIsValid(priorXmax) &&
    5139           6 :             !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
    5140             :                                  priorXmax))
    5141             :         {
    5142           0 :             result = TM_Ok;
    5143           0 :             goto out_locked;
    5144             :         }
    5145             : 
    5146             :         /*
    5147             :          * Also check Xmin: if this tuple was created by an aborted
    5148             :          * (sub)transaction, then we already locked the last live one in the
    5149             :          * chain, thus we're done, so return success.
    5150             :          */
    5151         176 :         if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
    5152             :         {
    5153          24 :             result = TM_Ok;
    5154          24 :             goto out_locked;
    5155             :         }
    5156             : 
    5157         152 :         old_infomask = mytup.t_data->t_infomask;
    5158         152 :         old_infomask2 = mytup.t_data->t_infomask2;
    5159         152 :         xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5160             : 
    5161             :         /*
    5162             :          * If this tuple version has been updated or locked by some concurrent
    5163             :          * transaction(s), what we do depends on whether our lock mode
    5164             :          * conflicts with what those other transactions hold, and also on the
    5165             :          * status of them.
    5166             :          */
    5167         152 :         if (!(old_infomask & HEAP_XMAX_INVALID))
    5168             :         {
    5169             :             TransactionId rawxmax;
    5170             :             bool        needwait;
    5171             : 
    5172          56 :             rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
    5173          56 :             if (old_infomask & HEAP_XMAX_IS_MULTI)
    5174             :             {
    5175             :                 int         nmembers;
    5176             :                 int         i;
    5177             :                 MultiXactMember *members;
    5178             : 
    5179             :                 /*
    5180             :                  * We don't need a test for pg_upgrade'd tuples: this is only
    5181             :                  * applied to tuples after the first in an update chain.  Said
    5182             :                  * first tuple in the chain may well be locked-in-9.2-and-
    5183             :                  * pg_upgraded, but that one was already locked by our caller,
    5184             :                  * not us; and any subsequent ones cannot be because our
    5185             :                  * caller must necessarily have obtained a snapshot later than
    5186             :                  * the pg_upgrade itself.
    5187             :                  */
    5188             :                 Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
    5189             : 
    5190           2 :                 nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
    5191           2 :                                                  HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
    5192           8 :                 for (i = 0; i < nmembers; i++)
    5193             :                 {
    5194           6 :                     result = test_lockmode_for_conflict(members[i].status,
    5195           6 :                                                         members[i].xid,
    5196             :                                                         mode,
    5197             :                                                         &mytup,
    5198             :                                                         &needwait);
    5199             : 
    5200             :                     /*
    5201             :                      * If the tuple was already locked by ourselves in a
    5202             :                      * previous iteration of this (say heap_lock_tuple was
    5203             :                      * forced to restart the locking loop because of a change
    5204             :                      * in xmax), then we hold the lock already on this tuple
    5205             :                      * version and we don't need to do anything; and this is
    5206             :                      * not an error condition either.  We just need to skip
    5207             :                      * this tuple and continue locking the next version in the
    5208             :                      * update chain.
    5209             :                      */
    5210           6 :                     if (result == TM_SelfModified)
    5211             :                     {
    5212           0 :                         pfree(members);
    5213           0 :                         goto next;
    5214             :                     }
    5215             : 
    5216           6 :                     if (needwait)
    5217             :                     {
    5218           0 :                         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5219           0 :                         XactLockTableWait(members[i].xid, rel,
    5220             :                                           &mytup.t_self,
    5221             :                                           XLTW_LockUpdated);
    5222           0 :                         pfree(members);
    5223           0 :                         goto l4;
    5224             :                     }
    5225           6 :                     if (result != TM_Ok)
    5226             :                     {
    5227           0 :                         pfree(members);
    5228           0 :                         goto out_locked;
    5229             :                     }
    5230             :                 }
    5231           2 :                 if (members)
    5232           2 :                     pfree(members);
    5233             :             }
    5234             :             else
    5235             :             {
    5236             :                 MultiXactStatus status;
    5237             : 
    5238             :                 /*
    5239             :                  * For a non-multi Xmax, we first need to compute the
    5240             :                  * corresponding MultiXactStatus by using the infomask bits.
    5241             :                  */
    5242          54 :                 if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
    5243             :                 {
    5244          32 :                     if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
    5245          16 :                         status = MultiXactStatusForKeyShare;
    5246           0 :                     else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
    5247           0 :                         status = MultiXactStatusForShare;
    5248           0 :                     else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
    5249             :                     {
    5250           0 :                         if (old_infomask2 & HEAP_KEYS_UPDATED)
    5251           0 :                             status = MultiXactStatusForUpdate;
    5252             :                         else
    5253           0 :                             status = MultiXactStatusForNoKeyUpdate;
    5254             :                     }
    5255             :                     else
    5256             :                     {
    5257             :                         /*
    5258             :                          * LOCK_ONLY present alone (a pg_upgraded tuple marked
    5259             :                          * as share-locked in the old cluster) shouldn't be
    5260             :                          * seen in the middle of an update chain.
    5261             :                          */
    5262           0 :                         elog(ERROR, "invalid lock status in tuple");
    5263             :                     }
    5264             :                 }
    5265             :                 else
    5266             :                 {
    5267             :                     /* it's an update, but which kind? */
    5268          38 :                     if (old_infomask2 & HEAP_KEYS_UPDATED)
    5269          28 :                         status = MultiXactStatusUpdate;
    5270             :                     else
    5271          10 :                         status = MultiXactStatusNoKeyUpdate;
    5272             :                 }
    5273             : 
    5274          54 :                 result = test_lockmode_for_conflict(status, rawxmax, mode,
    5275             :                                                     &mytup, &needwait);
    5276             : 
    5277             :                 /*
    5278             :                  * If the tuple was already locked by ourselves in a previous
    5279             :                  * iteration of this (say heap_lock_tuple was forced to
    5280             :                  * restart the locking loop because of a change in xmax), then
    5281             :                  * we hold the lock already on this tuple version and we don't
    5282             :                  * need to do anything; and this is not an error condition
    5283             :                  * either.  We just need to skip this tuple and continue
    5284             :                  * locking the next version in the update chain.
    5285             :                  */
    5286          54 :                 if (result == TM_SelfModified)
    5287           0 :                     goto next;
    5288             : 
    5289          54 :                 if (needwait)
    5290             :                 {
    5291          16 :                     LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    5292          16 :                     XactLockTableWait(rawxmax, rel, &mytup.t_self,
    5293             :                                       XLTW_LockUpdated);
    5294          16 :                     goto l4;
    5295             :                 }
    5296          38 :                 if (result != TM_Ok)
    5297             :                 {
    5298          16 :                     goto out_locked;
    5299             :                 }
    5300             :             }
    5301             :         }
    5302             : 
    5303             :         /* compute the new Xmax and infomask values for the tuple ... */
    5304         120 :         compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
    5305             :                                   xid, mode, false,
    5306             :                                   &new_xmax, &new_infomask, &new_infomask2);
    5307             : 
    5308         120 :         if (PageIsAllVisible(BufferGetPage(buf)) &&
    5309           0 :             visibilitymap_clear(rel, block, vmbuffer,
    5310             :                                 VISIBILITYMAP_ALL_FROZEN))
    5311           0 :             cleared_all_frozen = true;
    5312             : 
    5313         120 :         START_CRIT_SECTION();
    5314             : 
    5315             :         /* ... and set them */
    5316         120 :         HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
    5317         120 :         mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
    5318         120 :         mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    5319         120 :         mytup.t_data->t_infomask |= new_infomask;
    5320         120 :         mytup.t_data->t_infomask2 |= new_infomask2;
    5321             : 
    5322         120 :         MarkBufferDirty(buf);
    5323             : 
    5324             :         /* XLOG stuff */
    5325         120 :         if (RelationNeedsWAL(rel))
    5326             :         {
    5327             :             xl_heap_lock_updated xlrec;
    5328             :             XLogRecPtr  recptr;
    5329         120 :             Page        page = BufferGetPage(buf);
    5330             : 
    5331         120 :             XLogBeginInsert();
    5332         120 :             XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    5333             : 
    5334         120 :             xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
    5335         120 :             xlrec.xmax = new_xmax;
    5336         120 :             xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
    5337         120 :             xlrec.flags =
    5338         120 :                 cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
    5339             : 
    5340         120 :             XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
    5341             : 
    5342         120 :             recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
    5343             : 
    5344         120 :             PageSetLSN(page, recptr);
    5345             :         }
    5346             : 
    5347         120 :         END_CRIT_SECTION();
    5348             : 
    5349         120 : next:
    5350             :         /* if we find the end of update chain, we're done. */
    5351         120 :         if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
    5352         240 :             HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
    5353         128 :             ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
    5354           8 :             HeapTupleHeaderIsOnlyLocked(mytup.t_data))
    5355             :         {
    5356         114 :             result = TM_Ok;
    5357         114 :             goto out_locked;
    5358             :         }
    5359             : 
    5360             :         /* tail recursion */
    5361           6 :         priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
    5362           6 :         ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
    5363           6 :         UnlockReleaseBuffer(buf);
    5364             :     }
    5365             : 
    5366             :     result = TM_Ok;
    5367             : 
    5368         154 : out_locked:
    5369         154 :     UnlockReleaseBuffer(buf);
    5370             : 
    5371         154 : out_unlocked:
    5372         154 :     if (vmbuffer != InvalidBuffer)
    5373           0 :         ReleaseBuffer(vmbuffer);
    5374             : 
    5375         154 :     return result;
    5376             : }
    5377             : 
    5378             : /*
    5379             :  * heap_lock_updated_tuple
    5380             :  *      Follow update chain when locking an updated tuple, acquiring locks (row
    5381             :  *      marks) on the updated versions.
    5382             :  *
    5383             :  * The initial tuple is assumed to be already locked.
    5384             :  *
    5385             :  * This function doesn't check visibility, it just unconditionally marks the
    5386             :  * tuple(s) as locked.  If any tuple in the updated chain is being deleted
    5387             :  * concurrently (or updated with the key being modified), sleep until the
    5388             :  * transaction doing it is finished.
    5389             :  *
    5390             :  * Note that we don't acquire heavyweight tuple locks on the tuples we walk
    5391             :  * when we have to wait for other transactions to release them, as opposed to
    5392             :  * what heap_lock_tuple does.  The reason is that having more than one
    5393             :  * transaction walking the chain is probably uncommon enough that risk of
    5394             :  * starvation is not likely: one of the preconditions for being here is that
    5395             :  * the snapshot in use predates the update that created this tuple (because we
    5396             :  * started at an earlier version of the tuple), but at the same time such a
    5397             :  * transaction cannot be using repeatable read or serializable isolation
    5398             :  * levels, because that would lead to a serializability failure.
    5399             :  */
    5400             : static TM_Result
    5401         170 : heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
    5402             :                         TransactionId xid, LockTupleMode mode)
    5403             : {
    5404             :     /*
    5405             :      * If the tuple has not been updated, or has moved into another partition
    5406             :      * (effectively a delete) stop here.
    5407             :      */
    5408         170 :     if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) &&
    5409         166 :         !ItemPointerEquals(&tuple->t_self, ctid))
    5410             :     {
    5411             :         /*
    5412             :          * If this is the first possibly-multixact-able operation in the
    5413             :          * current transaction, set my per-backend OldestMemberMXactId
    5414             :          * setting. We can be certain that the transaction will never become a
    5415             :          * member of any older MultiXactIds than that.  (We have to do this
    5416             :          * even if we end up just using our own TransactionId below, since
    5417             :          * some other backend could incorporate our XID into a MultiXact
    5418             :          * immediately afterwards.)
    5419             :          */
    5420         154 :         MultiXactIdSetOldestMember();
    5421             : 
    5422         154 :         return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
    5423             :     }
    5424             : 
    5425             :     /* nothing to lock */
    5426          16 :     return TM_Ok;
    5427             : }
    5428             : 
    5429             : /*
    5430             :  *  heap_finish_speculative - mark speculative insertion as successful
    5431             :  *
    5432             :  * To successfully finish a speculative insertion we have to clear speculative
    5433             :  * token from tuple.  To do so the t_ctid field, which will contain a
    5434             :  * speculative token value, is modified in place to point to the tuple itself,
    5435             :  * which is characteristic of a newly inserted ordinary tuple.
    5436             :  *
    5437             :  * NB: It is not ok to commit without either finishing or aborting a
    5438             :  * speculative insertion.  We could treat speculative tuples of committed
    5439             :  * transactions implicitly as completed, but then we would have to be prepared
    5440             :  * to deal with speculative tokens on committed tuples.  That wouldn't be
    5441             :  * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
    5442             :  * but clearing the token at completion isn't very expensive either.
    5443             :  * An explicit confirmation WAL record also makes logical decoding simpler.
    5444             :  */
    5445             : void
    5446        3884 : heap_finish_speculative(Relation relation, ItemPointer tid)
    5447             : {
    5448             :     Buffer      buffer;
    5449             :     Page        page;
    5450             :     OffsetNumber offnum;
    5451        3884 :     ItemId      lp = NULL;
    5452             :     HeapTupleHeader htup;
    5453             : 
    5454        3884 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
    5455        3884 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    5456        3884 :     page = (Page) BufferGetPage(buffer);
    5457             : 
    5458        3884 :     offnum = ItemPointerGetOffsetNumber(tid);
    5459        3884 :     if (PageGetMaxOffsetNumber(page) >= offnum)
    5460        3884 :         lp = PageGetItemId(page, offnum);
    5461             : 
    5462        3884 :     if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    5463           0 :         elog(ERROR, "invalid lp");
    5464             : 
    5465        3884 :     htup = (HeapTupleHeader) PageGetItem(page, lp);
    5466             : 
    5467             :     /* SpecTokenOffsetNumber should be distinguishable from any real offset */
    5468             :     StaticAssertStmt(MaxOffsetNumber < SpecTokenOffsetNumber,
    5469             :                      "invalid speculative token constant");
    5470             : 
    5471             :     /* NO EREPORT(ERROR) from here till changes are logged */
    5472        3884 :     START_CRIT_SECTION();
    5473             : 
    5474             :     Assert(HeapTupleHeaderIsSpeculative(htup));
    5475             : 
    5476        3884 :     MarkBufferDirty(buffer);
    5477             : 
    5478             :     /*
    5479             :      * Replace the speculative insertion token with a real t_ctid, pointing to
    5480             :      * itself like it does on regular tuples.
    5481             :      */
    5482        3884 :     htup->t_ctid = *tid;
    5483             : 
    5484             :     /* XLOG stuff */
    5485        3884 :     if (RelationNeedsWAL(relation))
    5486             :     {
    5487             :         xl_heap_confirm xlrec;
    5488             :         XLogRecPtr  recptr;
    5489             : 
    5490        3876 :         xlrec.offnum = ItemPointerGetOffsetNumber(tid);
    5491             : 
    5492        3876 :         XLogBeginInsert();
    5493             : 
    5494             :         /* We want the same filtering on this as on a plain insert */
    5495        3876 :         XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    5496             : 
    5497        3876 :         XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
    5498        3876 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    5499             : 
    5500        3876 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
    5501             : 
    5502        3876 :         PageSetLSN(page, recptr);
    5503             :     }
    5504             : 
    5505        3884 :     END_CRIT_SECTION();
    5506             : 
    5507        3884 :     UnlockReleaseBuffer(buffer);
    5508        3884 : }
    5509             : 
    5510             : /*
    5511             :  *  heap_abort_speculative - kill a speculatively inserted tuple
    5512             :  *
    5513             :  * Marks a tuple that was speculatively inserted in the same command as dead,
    5514             :  * by setting its xmin as invalid.  That makes it immediately appear as dead
    5515             :  * to all transactions, including our own.  In particular, it makes
    5516             :  * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
    5517             :  * inserting a duplicate key value won't unnecessarily wait for our whole
    5518             :  * transaction to finish (it'll just wait for our speculative insertion to
    5519             :  * finish).
    5520             :  *
    5521             :  * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
    5522             :  * that arise due to a mutual dependency that is not user visible.  By
    5523             :  * definition, unprincipled deadlocks cannot be prevented by the user
    5524             :  * reordering lock acquisition in client code, because the implementation level
    5525             :  * lock acquisitions are not under the user's direct control.  If speculative
    5526             :  * inserters did not take this precaution, then under high concurrency they
    5527             :  * could deadlock with each other, which would not be acceptable.
    5528             :  *
    5529             :  * This is somewhat redundant with heap_delete, but we prefer to have a
    5530             :  * dedicated routine with stripped down requirements.  Note that this is also
    5531             :  * used to delete the TOAST tuples created during speculative insertion.
    5532             :  *
    5533             :  * This routine does not affect logical decoding as it only looks at
    5534             :  * confirmation records.
    5535             :  */
    5536             : void
    5537          20 : heap_abort_speculative(Relation relation, ItemPointer tid)
    5538             : {
    5539          20 :     TransactionId xid = GetCurrentTransactionId();
    5540             :     ItemId      lp;
    5541             :     HeapTupleData tp;
    5542             :     Page        page;
    5543             :     BlockNumber block;
    5544             :     Buffer      buffer;
    5545             :     TransactionId prune_xid;
    5546             : 
    5547             :     Assert(ItemPointerIsValid(tid));
    5548             : 
    5549          20 :     block = ItemPointerGetBlockNumber(tid);
    5550          20 :     buffer = ReadBuffer(relation, block);
    5551          20 :     page = BufferGetPage(buffer);
    5552             : 
    5553          20 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    5554             : 
    5555             :     /*
    5556             :      * Page can't be all visible, we just inserted into it, and are still
    5557             :      * running.
    5558             :      */
    5559             :     Assert(!PageIsAllVisible(page));
    5560             : 
    5561          20 :     lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
    5562             :     Assert(ItemIdIsNormal(lp));
    5563             : 
    5564          20 :     tp.t_tableOid = RelationGetRelid(relation);
    5565          20 :     tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
    5566          20 :     tp.t_len = ItemIdGetLength(lp);
    5567          20 :     tp.t_self = *tid;
    5568             : 
    5569             :     /*
    5570             :      * Sanity check that the tuple really is a speculatively inserted tuple,
    5571             :      * inserted by us.
    5572             :      */
    5573          20 :     if (tp.t_data->t_choice.t_heap.t_xmin != xid)
    5574           0 :         elog(ERROR, "attempted to kill a tuple inserted by another transaction");
    5575          20 :     if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
    5576           0 :         elog(ERROR, "attempted to kill a non-speculative tuple");
    5577             :     Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
    5578             : 
    5579             :     /*
    5580             :      * No need to check for serializable conflicts here.  There is never a
    5581             :      * need for a combocid, either.  No need to extract replica identity, or
    5582             :      * do anything special with infomask bits.
    5583             :      */
    5584             : 
    5585          20 :     START_CRIT_SECTION();
    5586             : 
    5587             :     /*
    5588             :      * The tuple will become DEAD immediately.  Flag that this page is a
    5589             :      * candidate for pruning by setting xmin to TransactionXmin. While not
    5590             :      * immediately prunable, it is the oldest xid we can cheaply determine
    5591             :      * that's safe against wraparound / being older than the table's
    5592             :      * relfrozenxid.  To defend against the unlikely case of a new relation
    5593             :      * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
    5594             :      * if so (vacuum can't subsequently move relfrozenxid to beyond
    5595             :      * TransactionXmin, so there's no race here).
    5596             :      */
    5597             :     Assert(TransactionIdIsValid(TransactionXmin));
    5598          20 :     if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
    5599           0 :         prune_xid = relation->rd_rel->relfrozenxid;
    5600             :     else
    5601          20 :         prune_xid = TransactionXmin;
    5602          20 :     PageSetPrunable(page, prune_xid);
    5603             : 
    5604             :     /* store transaction information of xact deleting the tuple */
    5605          20 :     tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    5606          20 :     tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    5607             : 
    5608             :     /*
    5609             :      * Set the tuple header xmin to InvalidTransactionId.  This makes the
    5610             :      * tuple immediately invisible everyone.  (In particular, to any
    5611             :      * transactions waiting on the speculative token, woken up later.)
    5612             :      */
    5613          20 :     HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
    5614             : 
    5615             :     /* Clear the speculative insertion token too */
    5616          20 :     tp.t_data->t_ctid = tp.t_self;
    5617             : 
    5618          20 :     MarkBufferDirty(buffer);
    5619             : 
    5620             :     /*
    5621             :      * XLOG stuff
    5622             :      *
    5623             :      * The WAL records generated here match heap_delete().  The same recovery
    5624             :      * routines are used.
    5625             :      */
    5626          20 :     if (RelationNeedsWAL(relation))
    5627             :     {
    5628             :         xl_heap_delete xlrec;
    5629             :         XLogRecPtr  recptr;
    5630             : 
    5631          20 :         xlrec.flags = XLH_DELETE_IS_SUPER;
    5632          40 :         xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
    5633          20 :                                               tp.t_data->t_infomask2);
    5634          20 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
    5635          20 :         xlrec.xmax = xid;
    5636             : 
    5637          20 :         XLogBeginInsert();
    5638          20 :         XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
    5639          20 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    5640             : 
    5641             :         /* No replica identity & replication origin logged */
    5642             : 
    5643          20 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
    5644             : 
    5645          20 :         PageSetLSN(page, recptr);
    5646             :     }
    5647             : 
    5648          20 :     END_CRIT_SECTION();
    5649             : 
    5650          20 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
    5651             : 
    5652          20 :     if (HeapTupleHasExternal(&tp))
    5653             :     {
    5654             :         Assert(!IsToastRelation(relation));
    5655           2 :         heap_toast_delete(relation, &tp, true);
    5656             :     }
    5657             : 
    5658             :     /*
    5659             :      * Never need to mark tuple for invalidation, since catalogs don't support
    5660             :      * speculative insertion
    5661             :      */
    5662             : 
    5663             :     /* Now we can release the buffer */
    5664          20 :     ReleaseBuffer(buffer);
    5665             : 
    5666             :     /* count deletion, as we counted the insertion too */
    5667          20 :     pgstat_count_heap_delete(relation);
    5668          20 : }
    5669             : 
    5670             : /*
    5671             :  * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
    5672             :  *
    5673             :  * Overwriting violates both MVCC and transactional safety, so the uses
    5674             :  * of this function in Postgres are extremely limited.  Nonetheless we
    5675             :  * find some places to use it.
    5676             :  *
    5677             :  * The tuple cannot change size, and therefore it's reasonable to assume
    5678             :  * that its null bitmap (if any) doesn't change either.  So we just
    5679             :  * overwrite the data portion of the tuple without touching the null
    5680             :  * bitmap or any of the header fields.
    5681             :  *
    5682             :  * tuple is an in-memory tuple structure containing the data to be written
    5683             :  * over the target tuple.  Also, tuple->t_self identifies the target tuple.
    5684             :  */
    5685             : void
    5686      231658 : heap_inplace_update(Relation relation, HeapTuple tuple)
    5687             : {
    5688             :     Buffer      buffer;
    5689             :     Page        page;
    5690             :     OffsetNumber offnum;
    5691      231658 :     ItemId      lp = NULL;
    5692             :     HeapTupleHeader htup;
    5693             :     uint32      oldlen;
    5694             :     uint32      newlen;
    5695             : 
    5696             :     /*
    5697             :      * For now, parallel operations are required to be strictly read-only.
    5698             :      * Unlike a regular update, this should never create a combo CID, so it
    5699             :      * might be possible to relax this restriction, but not without more
    5700             :      * thought and testing.  It's not clear that it would be useful, anyway.
    5701             :      */
    5702      231658 :     if (IsInParallelMode())
    5703           0 :         ereport(ERROR,
    5704             :                 (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
    5705             :                  errmsg("cannot update tuples during a parallel operation")));
    5706             : 
    5707      231658 :     buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
    5708      231658 :     LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
    5709      231658 :     page = (Page) BufferGetPage(buffer);
    5710             : 
    5711      231658 :     offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
    5712      231658 :     if (PageGetMaxOffsetNumber(page) >= offnum)
    5713      231658 :         lp = PageGetItemId(page, offnum);
    5714             : 
    5715      231658 :     if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    5716           0 :         elog(ERROR, "invalid lp");
    5717             : 
    5718      231658 :     htup = (HeapTupleHeader) PageGetItem(page, lp);
    5719             : 
    5720      231658 :     oldlen = ItemIdGetLength(lp) - htup->t_hoff;
    5721      231658 :     newlen = tuple->t_len - tuple->t_data->t_hoff;
    5722      231658 :     if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
    5723           0 :         elog(ERROR, "wrong tuple length");
    5724             : 
    5725             :     /* NO EREPORT(ERROR) from here till changes are logged */
    5726      231658 :     START_CRIT_SECTION();
    5727             : 
    5728      463316 :     memcpy((char *) htup + htup->t_hoff,
    5729      231658 :            (char *) tuple->t_data + tuple->t_data->t_hoff,
    5730             :            newlen);
    5731             : 
    5732      231658 :     MarkBufferDirty(buffer);
    5733             : 
    5734             :     /* XLOG stuff */
    5735      231658 :     if (RelationNeedsWAL(relation))
    5736             :     {
    5737             :         xl_heap_inplace xlrec;
    5738             :         XLogRecPtr  recptr;
    5739             : 
    5740      231650 :         xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
    5741             : 
    5742      231650 :         XLogBeginInsert();
    5743      231650 :         XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
    5744             : 
    5745      231650 :         XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    5746      231650 :         XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
    5747             : 
    5748             :         /* inplace updates aren't decoded atm, don't log the origin */
    5749             : 
    5750      231650 :         recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
    5751             : 
    5752      231650 :         PageSetLSN(page, recptr);
    5753             :     }
    5754             : 
    5755      231658 :     END_CRIT_SECTION();
    5756             : 
    5757      231658 :     UnlockReleaseBuffer(buffer);
    5758             : 
    5759             :     /*
    5760             :      * Send out shared cache inval if necessary.  Note that because we only
    5761             :      * pass the new version of the tuple, this mustn't be used for any
    5762             :      * operations that could change catcache lookup keys.  But we aren't
    5763             :      * bothering with index updates either, so that's true a fortiori.
    5764             :      */
    5765      231658 :     if (!IsBootstrapProcessingMode())
    5766      131418 :         CacheInvalidateHeapTuple(relation, tuple, NULL);
    5767      231658 : }
    5768             : 
    5769             : #define     FRM_NOOP                0x0001
    5770             : #define     FRM_INVALIDATE_XMAX     0x0002
    5771             : #define     FRM_RETURN_IS_XID       0x0004
    5772             : #define     FRM_RETURN_IS_MULTI     0x0008
    5773             : #define     FRM_MARK_COMMITTED      0x0010
    5774             : 
    5775             : /*
    5776             :  * FreezeMultiXactId
    5777             :  *      Determine what to do during freezing when a tuple is marked by a
    5778             :  *      MultiXactId.
    5779             :  *
    5780             :  * NB -- this might have the side-effect of creating a new MultiXactId!
    5781             :  *
    5782             :  * "flags" is an output value; it's used to tell caller what to do on return.
    5783             :  * Possible flags are:
    5784             :  * FRM_NOOP
    5785             :  *      don't do anything -- keep existing Xmax
    5786             :  * FRM_INVALIDATE_XMAX
    5787             :  *      mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
    5788             :  * FRM_RETURN_IS_XID
    5789             :  *      The Xid return value is a single update Xid to set as xmax.
    5790             :  * FRM_MARK_COMMITTED
    5791             :  *      Xmax can be marked as HEAP_XMAX_COMMITTED
    5792             :  * FRM_RETURN_IS_MULTI
    5793             :  *      The return value is a new MultiXactId to set as new Xmax.
    5794             :  *      (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
    5795             :  */
    5796             : static TransactionId
    5797           6 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
    5798             :                   TransactionId relfrozenxid, TransactionId relminmxid,
    5799             :                   TransactionId cutoff_xid, MultiXactId cutoff_multi,
    5800             :                   uint16 *flags)
    5801             : {
    5802           6 :     TransactionId xid = InvalidTransactionId;
    5803             :     int         i;
    5804             :     MultiXactMember *members;
    5805             :     int         nmembers;
    5806             :     bool        need_replace;
    5807             :     int         nnewmembers;
    5808             :     MultiXactMember *newmembers;
    5809             :     bool        has_lockers;
    5810             :     TransactionId update_xid;
    5811             :     bool        update_committed;
    5812             : 
    5813           6 :     *flags = 0;
    5814             : 
    5815             :     /* We should only be called in Multis */
    5816             :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    5817             : 
    5818           6 :     if (!MultiXactIdIsValid(multi) ||
    5819           6 :         HEAP_LOCKED_UPGRADED(t_infomask))
    5820             :     {
    5821             :         /* Ensure infomask bits are appropriately set/reset */
    5822           0 :         *flags |= FRM_INVALIDATE_XMAX;
    5823           0 :         return InvalidTransactionId;
    5824             :     }
    5825           6 :     else if (MultiXactIdPrecedes(multi, relminmxid))
    5826           0 :         ereport(ERROR,
    5827             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    5828             :                  errmsg_internal("found multixact %u from before relminmxid %u",
    5829             :                                  multi, relminmxid)));
    5830           6 :     else if (MultiXactIdPrecedes(multi, cutoff_multi))
    5831             :     {
    5832             :         /*
    5833             :          * This old multi cannot possibly have members still running, but
    5834             :          * verify just in case.  If it was a locker only, it can be removed
    5835             :          * without any further consideration; but if it contained an update,
    5836             :          * we might need to preserve it.
    5837             :          */
    5838           2 :         if (MultiXactIdIsRunning(multi,
    5839           2 :                                  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
    5840           0 :             ereport(ERROR,
    5841             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    5842             :                      errmsg_internal("multixact %u from before cutoff %u found to be still running",
    5843             :                                      multi, cutoff_multi)));
    5844             : 
    5845           2 :         if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
    5846             :         {
    5847           2 :             *flags |= FRM_INVALIDATE_XMAX;
    5848           2 :             xid = InvalidTransactionId; /* not strictly necessary */
    5849             :         }
    5850             :         else
    5851             :         {
    5852             :             /* replace multi by update xid */
    5853           0 :             xid = MultiXactIdGetUpdateXid(multi, t_infomask);
    5854             : 
    5855             :             /* wasn't only a lock, xid needs to be valid */
    5856             :             Assert(TransactionIdIsValid(xid));
    5857             : 
    5858           0 :             if (TransactionIdPrecedes(xid, relfrozenxid))
    5859           0 :                 ereport(ERROR,
    5860             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    5861             :                          errmsg_internal("found update xid %u from before relfrozenxid %u",
    5862             :                                          xid, relfrozenxid)));
    5863             : 
    5864             :             /*
    5865             :              * If the xid is older than the cutoff, it has to have aborted,
    5866             :              * otherwise the tuple would have gotten pruned away.
    5867             :              */
    5868           0 :             if (TransactionIdPrecedes(xid, cutoff_xid))
    5869             :             {
    5870           0 :                 if (TransactionIdDidCommit(xid))
    5871           0 :                     ereport(ERROR,
    5872             :                             (errcode(ERRCODE_DATA_CORRUPTED),
    5873             :                              errmsg_internal("cannot freeze committed update xid %u", xid)));
    5874           0 :                 *flags |= FRM_INVALIDATE_XMAX;
    5875           0 :                 xid = InvalidTransactionId; /* not strictly necessary */
    5876             :             }
    5877             :             else
    5878             :             {
    5879           0 :                 *flags |= FRM_RETURN_IS_XID;
    5880             :             }
    5881             :         }
    5882             : 
    5883           2 :         return xid;
    5884             :     }
    5885             : 
    5886             :     /*
    5887             :      * This multixact might have or might not have members still running, but
    5888             :      * we know it's valid and is newer than the cutoff point for multis.
    5889             :      * However, some member(s) of it may be below the cutoff for Xids, so we
    5890             :      * need to walk the whole members array to figure out what to do, if
    5891             :      * anything.
    5892             :      */
    5893             : 
    5894             :     nmembers =
    5895           4 :         GetMultiXactIdMembers(multi, &members, false,
    5896           4 :                               HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
    5897           4 :     if (nmembers <= 0)
    5898             :     {
    5899             :         /* Nothing worth keeping */
    5900           2 :         *flags |= FRM_INVALIDATE_XMAX;
    5901           2 :         return InvalidTransactionId;
    5902             :     }
    5903             : 
    5904             :     /* is there anything older than the cutoff? */
    5905           2 :     need_replace = false;
    5906           2 :     for (i = 0; i < nmembers; i++)
    5907             :     {
    5908           2 :         if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
    5909             :         {
    5910           2 :             need_replace = true;
    5911           2 :             break;
    5912             :         }
    5913             :     }
    5914             : 
    5915             :     /*
    5916             :      * In the simplest case, there is no member older than the cutoff; we can
    5917             :      * keep the existing MultiXactId as is.
    5918             :      */
    5919           2 :     if (!need_replace)
    5920             :     {
    5921           0 :         *flags |= FRM_NOOP;
    5922           0 :         pfree(members);
    5923           0 :         return InvalidTransactionId;
    5924             :     }
    5925             : 
    5926             :     /*
    5927             :      * If the multi needs to be updated, figure out which members do we need
    5928             :      * to keep.
    5929             :      */
    5930           2 :     nnewmembers = 0;
    5931           2 :     newmembers = palloc(sizeof(MultiXactMember) * nmembers);
    5932           2 :     has_lockers = false;
    5933           2 :     update_xid = InvalidTransactionId;
    5934           2 :     update_committed = false;
    5935             : 
    5936           6 :     for (i = 0; i < nmembers; i++)
    5937             :     {
    5938             :         /*
    5939             :          * Determine whether to keep this member or ignore it.
    5940             :          */
    5941           4 :         if (ISUPDATE_from_mxstatus(members[i].status))
    5942             :         {
    5943           0 :             TransactionId xid = members[i].xid;
    5944             : 
    5945             :             Assert(TransactionIdIsValid(xid));
    5946           0 :             if (TransactionIdPrecedes(xid, relfrozenxid))
    5947           0 :                 ereport(ERROR,
    5948             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    5949             :                          errmsg_internal("found update xid %u from before relfrozenxid %u",
    5950             :                                          xid, relfrozenxid)));
    5951             : 
    5952             :             /*
    5953             :              * It's an update; should we keep it?  If the transaction is known
    5954             :              * aborted or crashed then it's okay to ignore it, otherwise not.
    5955             :              * Note that an updater older than cutoff_xid cannot possibly be
    5956             :              * committed, because HeapTupleSatisfiesVacuum would have returned
    5957             :              * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
    5958             :              *
    5959             :              * As with all tuple visibility routines, it's critical to test
    5960             :              * TransactionIdIsInProgress before TransactionIdDidCommit,
    5961             :              * because of race conditions explained in detail in
    5962             :              * heapam_visibility.c.
    5963             :              */
    5964           0 :             if (TransactionIdIsCurrentTransactionId(xid) ||
    5965           0 :                 TransactionIdIsInProgress(xid))
    5966             :             {
    5967             :                 Assert(!TransactionIdIsValid(update_xid));
    5968           0 :                 update_xid = xid;
    5969             :             }
    5970           0 :             else if (TransactionIdDidCommit(xid))
    5971             :             {
    5972             :                 /*
    5973             :                  * The transaction committed, so we can tell caller to set
    5974             :                  * HEAP_XMAX_COMMITTED.  (We can only do this because we know
    5975             :                  * the transaction is not running.)
    5976             :                  */
    5977             :                 Assert(!TransactionIdIsValid(update_xid));
    5978           0 :                 update_committed = true;
    5979           0 :                 update_xid = xid;
    5980             :             }
    5981             :             else
    5982             :             {
    5983             :                 /*
    5984             :                  * Not in progress, not committed -- must be aborted or
    5985             :                  * crashed; we can ignore it.
    5986             :                  */
    5987             :             }
    5988             : 
    5989             :             /*
    5990             :              * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
    5991             :              * update Xid cannot possibly be older than the xid cutoff. The
    5992             :              * presence of such a tuple would cause corruption, so be paranoid
    5993             :              * and check.
    5994             :              */
    5995           0 :             if (TransactionIdIsValid(update_xid) &&
    5996           0 :                 TransactionIdPrecedes(update_xid, cutoff_xid))
    5997           0 :                 ereport(ERROR,
    5998             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    5999             :                          errmsg_internal("found update xid %u from before xid cutoff %u",
    6000             :                                          update_xid, cutoff_xid)));
    6001             : 
    6002             :             /*
    6003             :              * If we determined that it's an Xid corresponding to an update
    6004             :              * that must be retained, additionally add it to the list of
    6005             :              * members of the new Multi, in case we end up using that.  (We
    6006             :              * might still decide to use only an update Xid and not a multi,
    6007             :              * but it's easier to maintain the list as we walk the old members
    6008             :              * list.)
    6009             :              */
    6010           0 :             if (TransactionIdIsValid(update_xid))
    6011           0 :                 newmembers[nnewmembers++] = members[i];
    6012             :         }
    6013             :         else
    6014             :         {
    6015             :             /* We only keep lockers if they are still running */
    6016           8 :             if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
    6017           4 :                 TransactionIdIsInProgress(members[i].xid))
    6018             :             {
    6019             :                 /* running locker cannot possibly be older than the cutoff */
    6020             :                 Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
    6021           2 :                 newmembers[nnewmembers++] = members[i];
    6022           2 :                 has_lockers = true;
    6023             :             }
    6024             :         }
    6025             :     }
    6026             : 
    6027           2 :     pfree(members);
    6028             : 
    6029           2 :     if (nnewmembers == 0)
    6030             :     {
    6031             :         /* nothing worth keeping!? Tell caller to remove the whole thing */
    6032           0 :         *flags |= FRM_INVALIDATE_XMAX;
    6033           0 :         xid = InvalidTransactionId;
    6034             :     }
    6035           2 :     else if (TransactionIdIsValid(update_xid) && !has_lockers)
    6036             :     {
    6037             :         /*
    6038             :          * If there's a single member and it's an update, pass it back alone
    6039             :          * without creating a new Multi.  (XXX we could do this when there's a
    6040             :          * single remaining locker, too, but that would complicate the API too
    6041             :          * much; moreover, the case with the single updater is more
    6042             :          * interesting, because those are longer-lived.)
    6043             :          */
    6044             :         Assert(nnewmembers == 1);
    6045           0 :         *flags |= FRM_RETURN_IS_XID;
    6046           0 :         if (update_committed)
    6047           0 :             *flags |= FRM_MARK_COMMITTED;
    6048           0 :         xid = update_xid;
    6049             :     }
    6050             :     else
    6051             :     {
    6052             :         /*
    6053             :          * Create a new multixact with the surviving members of the previous
    6054             :          * one, to set as new Xmax in the tuple.
    6055             :          */
    6056           2 :         xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
    6057           2 :         *flags |= FRM_RETURN_IS_MULTI;
    6058             :     }
    6059             : 
    6060           2 :     pfree(newmembers);
    6061             : 
    6062           2 :     return xid;
    6063             : }
    6064             : 
    6065             : /*
    6066             :  * heap_prepare_freeze_tuple
    6067             :  *
    6068             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    6069             :  * are older than the specified cutoff XID and cutoff MultiXactId.  If so,
    6070             :  * setup enough state (in the *frz output argument) to later execute and
    6071             :  * WAL-log what we would need to do, and return true.  Return false if nothing
    6072             :  * is to be changed.  In addition, set *totally_frozen_p to true if the tuple
    6073             :  * will be totally frozen after these operations are performed and false if
    6074             :  * more freezing will eventually be required.
    6075             :  *
    6076             :  * Caller is responsible for setting the offset field, if appropriate.
    6077             :  *
    6078             :  * It is assumed that the caller has checked the tuple with
    6079             :  * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
    6080             :  * (else we should be removing the tuple, not freezing it).
    6081             :  *
    6082             :  * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
    6083             :  * XID older than it could neither be running nor seen as running by any
    6084             :  * open transaction.  This ensures that the replacement will not change
    6085             :  * anyone's idea of the tuple state.
    6086             :  * Similarly, cutoff_multi must be less than or equal to the smallest
    6087             :  * MultiXactId used by any transaction currently open.
    6088             :  *
    6089             :  * If the tuple is in a shared buffer, caller must hold an exclusive lock on
    6090             :  * that buffer.
    6091             :  *
    6092             :  * NB: It is not enough to set hint bits to indicate something is
    6093             :  * committed/invalid -- they might not be set on a standby, or after crash
    6094             :  * recovery.  We really need to remove old xids.
    6095             :  */
    6096             : bool
    6097    11671550 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
    6098             :                           TransactionId relfrozenxid, TransactionId relminmxid,
    6099             :                           TransactionId cutoff_xid, TransactionId cutoff_multi,
    6100             :                           xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
    6101             : {
    6102    11671550 :     bool        changed = false;
    6103    11671550 :     bool        xmax_already_frozen = false;
    6104             :     bool        xmin_frozen;
    6105             :     bool        freeze_xmax;
    6106             :     TransactionId xid;
    6107             : 
    6108    11671550 :     frz->frzflags = 0;
    6109    11671550 :     frz->t_infomask2 = tuple->t_infomask2;
    6110    11671550 :     frz->t_infomask = tuple->t_infomask;
    6111    11671550 :     frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
    6112             : 
    6113             :     /*
    6114             :      * Process xmin.  xmin_frozen has two slightly different meanings: in the
    6115             :      * !XidIsNormal case, it means "the xmin doesn't need any freezing" (it's
    6116             :      * already a permanent value), while in the block below it is set true to
    6117             :      * mean "xmin won't need freezing after what we do to it here" (false
    6118             :      * otherwise).  In both cases we're allowed to set totally_frozen, as far
    6119             :      * as xmin is concerned.
    6120             :      */
    6121    11671550 :     xid = HeapTupleHeaderGetXmin(tuple);
    6122    11671550 :     if (!TransactionIdIsNormal(xid))
    6123     4391212 :         xmin_frozen = true;
    6124             :     else
    6125             :     {
    6126     7280338 :         if (TransactionIdPrecedes(xid, relfrozenxid))
    6127           0 :             ereport(ERROR,
    6128             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6129             :                      errmsg_internal("found xmin %u from before relfrozenxid %u",
    6130             :                                      xid, relfrozenxid)));
    6131             : 
    6132     7280338 :         xmin_frozen = TransactionIdPrecedes(xid, cutoff_xid);
    6133     7280338 :         if (xmin_frozen)
    6134             :         {
    6135     5064554 :             if (!TransactionIdDidCommit(xid))
    6136           0 :                 ereport(ERROR,
    6137             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6138             :                          errmsg_internal("uncommitted xmin %u from before xid cutoff %u needs to be frozen",
    6139             :                                          xid, cutoff_xid)));
    6140             : 
    6141     5064554 :             frz->t_infomask |= HEAP_XMIN_FROZEN;
    6142     5064554 :             changed = true;
    6143             :         }
    6144             :     }
    6145             : 
    6146             :     /*
    6147             :      * Process xmax.  To thoroughly examine the current Xmax value we need to
    6148             :      * resolve a MultiXactId to its member Xids, in case some of them are
    6149             :      * below the given cutoff for Xids.  In that case, those values might need
    6150             :      * freezing, too.  Also, if a multi needs freezing, we cannot simply take
    6151             :      * it out --- if there's a live updater Xid, it needs to be kept.
    6152             :      *
    6153             :      * Make sure to keep heap_tuple_needs_freeze in sync with this.
    6154             :      */
    6155    11671550 :     xid = HeapTupleHeaderGetRawXmax(tuple);
    6156             : 
    6157    11671550 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    6158             :     {
    6159             :         TransactionId newxmax;
    6160             :         uint16      flags;
    6161             : 
    6162           6 :         newxmax = FreezeMultiXactId(xid, tuple->t_infomask,
    6163             :                                     relfrozenxid, relminmxid,
    6164             :                                     cutoff_xid, cutoff_multi, &flags);
    6165             : 
    6166           6 :         freeze_xmax = (flags & FRM_INVALIDATE_XMAX);
    6167             : 
    6168           6 :         if (flags & FRM_RETURN_IS_XID)
    6169             :         {
    6170             :             /*
    6171             :              * NB -- some of these transformations are only valid because we
    6172             :              * know the return Xid is a tuple updater (i.e. not merely a
    6173             :              * locker.) Also note that the only reason we don't explicitly
    6174             :              * worry about HEAP_KEYS_UPDATED is because it lives in
    6175             :              * t_infomask2 rather than t_infomask.
    6176             :              */
    6177           0 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    6178           0 :             frz->xmax = newxmax;
    6179           0 :             if (flags & FRM_MARK_COMMITTED)
    6180           0 :                 frz->t_infomask |= HEAP_XMAX_COMMITTED;
    6181           0 :             changed = true;
    6182             :         }
    6183           6 :         else if (flags & FRM_RETURN_IS_MULTI)
    6184             :         {
    6185             :             uint16      newbits;
    6186             :             uint16      newbits2;
    6187             : 
    6188             :             /*
    6189             :              * We can't use GetMultiXactIdHintBits directly on the new multi
    6190             :              * here; that routine initializes the masks to all zeroes, which
    6191             :              * would lose other bits we need.  Doing it this way ensures all
    6192             :              * unrelated bits remain untouched.
    6193             :              */
    6194           2 :             frz->t_infomask &= ~HEAP_XMAX_BITS;
    6195           2 :             frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6196           2 :             GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
    6197           2 :             frz->t_infomask |= newbits;
    6198           2 :             frz->t_infomask2 |= newbits2;
    6199             : 
    6200           2 :             frz->xmax = newxmax;
    6201             : 
    6202           2 :             changed = true;
    6203             :         }
    6204             :     }
    6205    11671544 :     else if (TransactionIdIsNormal(xid))
    6206             :     {
    6207      456386 :         if (TransactionIdPrecedes(xid, relfrozenxid))
    6208           0 :             ereport(ERROR,
    6209             :                     (errcode(ERRCODE_DATA_CORRUPTED),
    6210             :                      errmsg_internal("found xmax %u from before relfrozenxid %u",
    6211             :                                      xid, relfrozenxid)));
    6212             : 
    6213      456386 :         if (TransactionIdPrecedes(xid, cutoff_xid))
    6214             :         {
    6215             :             /*
    6216             :              * If we freeze xmax, make absolutely sure that it's not an XID
    6217             :              * that is important.  (Note, a lock-only xmax can be removed
    6218             :              * independent of committedness, since a committed lock holder has
    6219             :              * released the lock).
    6220             :              */
    6221          20 :             if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
    6222           2 :                 TransactionIdDidCommit(xid))
    6223           0 :                 ereport(ERROR,
    6224             :                         (errcode(ERRCODE_DATA_CORRUPTED),
    6225             :                          errmsg_internal("cannot freeze committed xmax %u",
    6226             :                                          xid)));
    6227          18 :             freeze_xmax = true;
    6228             :         }
    6229             :         else
    6230      456368 :             freeze_xmax = false;
    6231             :     }
    6232    11215158 :     else if ((tuple->t_infomask & HEAP_XMAX_INVALID) ||
    6233           0 :              !TransactionIdIsValid(HeapTupleHeaderGetRawXmax(tuple)))
    6234             :     {
    6235    11215158 :         freeze_xmax = false;
    6236    11215158 :         xmax_already_frozen = true;
    6237             :     }
    6238             :     else
    6239           0 :         ereport(ERROR,
    6240             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    6241             :                  errmsg_internal("found xmax %u (infomask 0x%04x) not frozen, not multi, not normal",
    6242             :                                  xid, tuple->t_infomask)));
    6243             : 
    6244    11671550 :     if (freeze_xmax)
    6245             :     {
    6246             :         Assert(!xmax_already_frozen);
    6247             : 
    6248          22 :         frz->xmax = InvalidTransactionId;
    6249             : 
    6250             :         /*
    6251             :          * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
    6252             :          * LOCKED.  Normalize to INVALID just to be sure no one gets confused.
    6253             :          * Also get rid of the HEAP_KEYS_UPDATED bit.
    6254             :          */
    6255          22 :         frz->t_infomask &= ~HEAP_XMAX_BITS;
    6256          22 :         frz->t_infomask |= HEAP_XMAX_INVALID;
    6257          22 :         frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
    6258          22 :         frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    6259          22 :         changed = true;
    6260             :     }
    6261             : 
    6262             :     /*
    6263             :      * Old-style VACUUM FULL is gone, but we have to keep this code as long as
    6264             :      * we support having MOVED_OFF/MOVED_IN tuples in the database.
    6265             :      */
    6266    11671550 :     if (tuple->t_infomask & HEAP_MOVED)
    6267             :     {
    6268           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    6269             : 
    6270             :         /*
    6271             :          * For Xvac, we ignore the cutoff_xid and just always perform the
    6272             :          * freeze operation.  The oldest release in which such a value can
    6273             :          * actually be set is PostgreSQL 8.4, because old-style VACUUM FULL
    6274             :          * was removed in PostgreSQL 9.0.  Note that if we were to respect
    6275             :          * cutoff_xid here, we'd need to make surely to clear totally_frozen
    6276             :          * when we skipped freezing on that basis.
    6277             :          */
    6278           0 :         if (TransactionIdIsNormal(xid))
    6279             :         {
    6280             :             /*
    6281             :              * If a MOVED_OFF tuple is not dead, the xvac transaction must
    6282             :              * have failed; whereas a non-dead MOVED_IN tuple must mean the
    6283             :              * xvac transaction succeeded.
    6284             :              */
    6285           0 :             if (tuple->t_infomask & HEAP_MOVED_OFF)
    6286           0 :                 frz->frzflags |= XLH_INVALID_XVAC;
    6287             :             else
    6288           0 :                 frz->frzflags |= XLH_FREEZE_XVAC;
    6289             : 
    6290             :             /*
    6291             :              * Might as well fix the hint bits too; usually XMIN_COMMITTED
    6292             :              * will already be set here, but there's a small chance not.
    6293             :              */
    6294             :             Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
    6295           0 :             frz->t_infomask |= HEAP_XMIN_COMMITTED;
    6296           0 :             changed = true;
    6297             :         }
    6298             :     }
    6299             : 
    6300    21127296 :     *totally_frozen_p = (xmin_frozen &&
    6301     9455746 :                          (freeze_xmax || xmax_already_frozen));
    6302    11671550 :     return changed;
    6303             : }
    6304             : 
    6305             : /*
    6306             :  * heap_execute_freeze_tuple
    6307             :  *      Execute the prepared freezing of a tuple.
    6308             :  *
    6309             :  * Caller is responsible for ensuring that no other backend can access the
    6310             :  * storage underlying this tuple, either by holding an exclusive lock on the
    6311             :  * buffer containing it (which is what lazy VACUUM does), or by having it be
    6312             :  * in private storage (which is what CLUSTER and friends do).
    6313             :  *
    6314             :  * Note: it might seem we could make the changes without exclusive lock, since
    6315             :  * TransactionId read/write is assumed atomic anyway.  However there is a race
    6316             :  * condition: someone who just fetched an old XID that we overwrite here could
    6317             :  * conceivably not finish checking the XID against pg_xact before we finish
    6318             :  * the VACUUM and perhaps truncate off the part of pg_xact he needs.  Getting
    6319             :  * exclusive lock ensures no other backend is in process of checking the
    6320             :  * tuple status.  Also, getting exclusive lock makes it safe to adjust the
    6321             :  * infomask bits.
    6322             :  *
    6323             :  * NB: All code in here must be safe to execute during crash recovery!
    6324             :  */
    6325             : void
    6326     5064558 : heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
    6327             : {
    6328     5064558 :     HeapTupleHeaderSetXmax(tuple, frz->xmax);
    6329             : 
    6330     5064558 :     if (frz->frzflags & XLH_FREEZE_XVAC)
    6331           0 :         HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
    6332             : 
    6333     5064558 :     if (frz->frzflags & XLH_INVALID_XVAC)
    6334           0 :         HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
    6335             : 
    6336     5064558 :     tuple->t_infomask = frz->t_infomask;
    6337     5064558 :     tuple->t_infomask2 = frz->t_infomask2;
    6338     5064558 : }
    6339             : 
    6340             : /*
    6341             :  * heap_freeze_tuple
    6342             :  *      Freeze tuple in place, without WAL logging.
    6343             :  *
    6344             :  * Useful for callers like CLUSTER that perform their own WAL logging.
    6345             :  */
    6346             : bool
    6347      483010 : heap_freeze_tuple(HeapTupleHeader tuple,
    6348             :                   TransactionId relfrozenxid, TransactionId relminmxid,
    6349             :                   TransactionId cutoff_xid, TransactionId cutoff_multi)
    6350             : {
    6351             :     xl_heap_freeze_tuple frz;
    6352             :     bool        do_freeze;
    6353             :     bool        tuple_totally_frozen;
    6354             : 
    6355      483010 :     do_freeze = heap_prepare_freeze_tuple(tuple,
    6356             :                                           relfrozenxid, relminmxid,
    6357             :                                           cutoff_xid, cutoff_multi,
    6358             :                                           &frz, &tuple_totally_frozen);
    6359             : 
    6360             :     /*
    6361             :      * Note that because this is not a WAL-logged operation, we don't need to
    6362             :      * fill in the offset in the freeze record.
    6363             :      */
    6364             : 
    6365      483010 :     if (do_freeze)
    6366      328030 :         heap_execute_freeze_tuple(tuple, &frz);
    6367      483010 :     return do_freeze;
    6368             : }
    6369             : 
    6370             : /*
    6371             :  * For a given MultiXactId, return the hint bits that should be set in the
    6372             :  * tuple's infomask.
    6373             :  *
    6374             :  * Normally this should be called for a multixact that was just created, and
    6375             :  * so is on our local cache, so the GetMembers call is fast.
    6376             :  */
    6377             : static void
    6378        2110 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
    6379             :                        uint16 *new_infomask2)
    6380             : {
    6381             :     int         nmembers;
    6382             :     MultiXactMember *members;
    6383             :     int         i;
    6384        2110 :     uint16      bits = HEAP_XMAX_IS_MULTI;
    6385        2110 :     uint16      bits2 = 0;
    6386        2110 :     bool        has_update = false;
    6387        2110 :     LockTupleMode strongest = LockTupleKeyShare;
    6388             : 
    6389             :     /*
    6390             :      * We only use this in multis we just created, so they cannot be values
    6391             :      * pre-pg_upgrade.
    6392             :      */
    6393        2110 :     nmembers = GetMultiXactIdMembers(multi, &members, false, false);
    6394             : 
    6395        6432 :     for (i = 0; i < nmembers; i++)
    6396             :     {
    6397             :         LockTupleMode mode;
    6398             : 
    6399             :         /*
    6400             :          * Remember the strongest lock mode held by any member of the
    6401             :          * multixact.
    6402             :          */
    6403        4322 :         mode = TUPLOCK_from_mxstatus(members[i].status);
    6404        4322 :         if (mode > strongest)
    6405        1146 :             strongest = mode;
    6406             : 
    6407             :         /* See what other bits we need */
    6408        4322 :         switch (members[i].status)
    6409             :         {
    6410        4074 :             case MultiXactStatusForKeyShare:
    6411             :             case MultiXactStatusForShare:
    6412             :             case MultiXactStatusForNoKeyUpdate:
    6413        4074 :                 break;
    6414             : 
    6415          14 :             case MultiXactStatusForUpdate:
    6416          14 :                 bits2 |= HEAP_KEYS_UPDATED;
    6417          14 :                 break;
    6418             : 
    6419         216 :             case MultiXactStatusNoKeyUpdate:
    6420         216 :                 has_update = true;
    6421         216 :                 break;
    6422             : 
    6423          18 :             case MultiXactStatusUpdate:
    6424          18 :                 bits2 |= HEAP_KEYS_UPDATED;
    6425          18 :                 has_update = true;
    6426          18 :                 break;
    6427             :         }
    6428        4322 :     }
    6429             : 
    6430        2110 :     if (strongest == LockTupleExclusive ||
    6431             :         strongest == LockTupleNoKeyExclusive)
    6432         328 :         bits |= HEAP_XMAX_EXCL_LOCK;
    6433        1782 :     else if (strongest == LockTupleShare)
    6434         812 :         bits |= HEAP_XMAX_SHR_LOCK;
    6435         970 :     else if (strongest == LockTupleKeyShare)
    6436         970 :         bits |= HEAP_XMAX_KEYSHR_LOCK;
    6437             : 
    6438        2110 :     if (!has_update)
    6439        1876 :         bits |= HEAP_XMAX_LOCK_ONLY;
    6440             : 
    6441        2110 :     if (nmembers > 0)
    6442        2110 :         pfree(members);
    6443             : 
    6444        2110 :     *new_infomask = bits;
    6445        2110 :     *new_infomask2 = bits2;
    6446        2110 : }
    6447             : 
    6448             : /*
    6449             :  * MultiXactIdGetUpdateXid
    6450             :  *
    6451             :  * Given a multixact Xmax and corresponding infomask, which does not have the
    6452             :  * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
    6453             :  * transaction.
    6454             :  *
    6455             :  * Caller is expected to check the status of the updating transaction, if
    6456             :  * necessary.
    6457             :  */
    6458             : static TransactionId
    6459         688 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
    6460             : {
    6461         688 :     TransactionId update_xact = InvalidTransactionId;
    6462             :     MultiXactMember *members;
    6463             :     int         nmembers;
    6464             : 
    6465             :     Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
    6466             :     Assert(t_infomask & HEAP_XMAX_IS_MULTI);
    6467             : 
    6468             :     /*
    6469             :      * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
    6470             :      * pre-pg_upgrade.
    6471             :      */
    6472         688 :     nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
    6473             : 
    6474         688 :     if (nmembers > 0)
    6475             :     {
    6476             :         int         i;
    6477             : 
    6478        1686 :         for (i = 0; i < nmembers; i++)
    6479             :         {
    6480             :             /* Ignore lockers */
    6481        1686 :             if (!ISUPDATE_from_mxstatus(members[i].status))
    6482         998 :                 continue;
    6483             : 
    6484             :             /* there can be at most one updater */
    6485             :             Assert(update_xact == InvalidTransactionId);
    6486         688 :             update_xact = members[i].xid;
    6487             : #ifndef USE_ASSERT_CHECKING
    6488             : 
    6489             :             /*
    6490             :              * in an assert-enabled build, walk the whole array to ensure
    6491             :              * there's no other updater.
    6492             :              */
    6493         688 :             break;
    6494             : #endif
    6495             :         }
    6496             : 
    6497         688 :         pfree(members);
    6498             :     }
    6499             : 
    6500         688 :     return update_xact;
    6501             : }
    6502             : 
    6503             : /*
    6504             :  * HeapTupleGetUpdateXid
    6505             :  *      As above, but use a HeapTupleHeader
    6506             :  *
    6507             :  * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
    6508             :  * checking the hint bits.
    6509             :  */
    6510             : TransactionId
    6511         672 : HeapTupleGetUpdateXid(HeapTupleHeader tuple)
    6512             : {
    6513        1344 :     return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
    6514         672 :                                    tuple->t_infomask);
    6515             : }
    6516             : 
    6517             : /*
    6518             :  * Does the given multixact conflict with the current transaction grabbing a
    6519             :  * tuple lock of the given strength?
    6520             :  *
    6521             :  * The passed infomask pairs up with the given multixact in the tuple header.
    6522             :  *
    6523             :  * If current_is_member is not NULL, it is set to 'true' if the current
    6524             :  * transaction is a member of the given multixact.
    6525             :  */
    6526             : static bool
    6527         152 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
    6528             :                         LockTupleMode lockmode, bool *current_is_member)
    6529             : {
    6530             :     int         nmembers;
    6531             :     MultiXactMember *members;
    6532         152 :     bool        result = false;
    6533         152 :     LOCKMODE    wanted = tupleLockExtraInfo[lockmode].hwlock;
    6534             : 
    6535         152 :     if (HEAP_LOCKED_UPGRADED(infomask))
    6536           0 :         return false;
    6537             : 
    6538         152 :     nmembers = GetMultiXactIdMembers(multi, &members, false,
    6539         152 :                                      HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    6540         152 :     if (nmembers >= 0)
    6541             :     {
    6542             :         int         i;
    6543             : 
    6544         482 :         for (i = 0; i < nmembers; i++)
    6545             :         {
    6546             :             TransactionId memxid;
    6547             :             LOCKMODE    memlockmode;
    6548             : 
    6549         342 :             if (result && (current_is_member == NULL || *current_is_member))
    6550             :                 break;
    6551             : 
    6552         330 :             memlockmode = LOCKMODE_from_mxstatus(members[i].status);
    6553             : 
    6554             :             /* ignore members from current xact (but track their presence) */
    6555         330 :             memxid = members[i].xid;
    6556         330 :             if (TransactionIdIsCurrentTransactionId(memxid))
    6557             :             {
    6558         112 :                 if (current_is_member != NULL)
    6559          86 :                     *current_is_member = true;
    6560         112 :                 continue;
    6561             :             }
    6562         218 :             else if (result)
    6563          16 :                 continue;
    6564             : 
    6565             :             /* ignore members that don't conflict with the lock we want */
    6566         202 :             if (!DoLockModesConflict(memlockmode, wanted))
    6567         132 :                 continue;
    6568             : 
    6569          70 :             if (ISUPDATE_from_mxstatus(members[i].status))
    6570             :             {
    6571             :                 /* ignore aborted updaters */
    6572          34 :                 if (TransactionIdDidAbort(memxid))
    6573           2 :                     continue;
    6574             :             }
    6575             :             else
    6576             :             {
    6577             :                 /* ignore lockers-only that are no longer in progress */
    6578          36 :                 if (!TransactionIdIsInProgress(memxid))
    6579          10 :                     continue;
    6580             :             }
    6581             : 
    6582             :             /*
    6583             :              * Whatever remains are either live lockers that conflict with our
    6584             :              * wanted lock, and updaters that are not aborted.  Those conflict
    6585             :              * with what we want.  Set up to return true, but keep going to
    6586             :              * look for the current transaction among the multixact members,
    6587             :              * if needed.
    6588             :              */
    6589          58 :             result = true;
    6590             :         }
    6591         152 :         pfree(members);
    6592             :     }
    6593             : 
    6594         152 :     return result;
    6595             : }
    6596             : 
    6597             : /*
    6598             :  * Do_MultiXactIdWait
    6599             :  *      Actual implementation for the two functions below.
    6600             :  *
    6601             :  * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
    6602             :  * needed to ensure we only sleep on conflicting members, and the infomask is
    6603             :  * used to optimize multixact access in case it's a lock-only multi); 'nowait'
    6604             :  * indicates whether to use conditional lock acquisition, to allow callers to
    6605             :  * fail if lock is unavailable.  'rel', 'ctid' and 'oper' are used to set up
    6606             :  * context information for error messages.  'remaining', if not NULL, receives
    6607             :  * the number of members that are still running, including any (non-aborted)
    6608             :  * subtransactions of our own transaction.
    6609             :  *
    6610             :  * We do this by sleeping on each member using XactLockTableWait.  Any
    6611             :  * members that belong to the current backend are *not* waited for, however;
    6612             :  * this would not merely be useless but would lead to Assert failure inside
    6613             :  * XactLockTableWait.  By the time this returns, it is certain that all
    6614             :  * transactions *of other backends* that were members of the MultiXactId
    6615             :  * that conflict with the requested status are dead (and no new ones can have
    6616             :  * been added, since it is not legal to add members to an existing
    6617             :  * MultiXactId).
    6618             :  *
    6619             :  * But by the time we finish sleeping, someone else may have changed the Xmax
    6620             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    6621             :  *
    6622             :  * Note that in case we return false, the number of remaining members is
    6623             :  * not to be trusted.
    6624             :  */
    6625             : static bool
    6626         110 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    6627             :                    uint16 infomask, bool nowait,
    6628             :                    Relation rel, ItemPointer ctid, XLTW_Oper oper,
    6629             :                    int *remaining)
    6630             : {
    6631         110 :     bool        result = true;
    6632             :     MultiXactMember *members;
    6633             :     int         nmembers;
    6634         110 :     int         remain = 0;
    6635             : 
    6636             :     /* for pre-pg_upgrade tuples, no need to sleep at all */
    6637         220 :     nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
    6638         110 :         GetMultiXactIdMembers(multi, &members, false,
    6639         110 :                               HEAP_XMAX_IS_LOCKED_ONLY(infomask));
    6640             : 
    6641         110 :     if (nmembers >= 0)
    6642             :     {
    6643             :         int         i;
    6644             : 
    6645         356 :         for (i = 0; i < nmembers; i++)
    6646             :         {
    6647         254 :             TransactionId memxid = members[i].xid;
    6648         254 :             MultiXactStatus memstatus = members[i].status;
    6649             : 
    6650         254 :             if (TransactionIdIsCurrentTransactionId(memxid))
    6651             :             {
    6652          46 :                 remain++;
    6653          46 :                 continue;
    6654             :             }
    6655             : 
    6656         208 :             if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
    6657         208 :                                      LOCKMODE_from_mxstatus(status)))
    6658             :             {
    6659          40 :                 if (remaining && TransactionIdIsInProgress(memxid))
    6660          12 :                     remain++;
    6661          40 :                 continue;
    6662             :             }
    6663             : 
    6664             :             /*
    6665             :              * This member conflicts with our multi, so we have to sleep (or
    6666             :              * return failure, if asked to avoid waiting.)
    6667             :              *
    6668             :              * Note that we don't set up an error context callback ourselves,
    6669             :              * but instead we pass the info down to XactLockTableWait.  This
    6670             :              * might seem a bit wasteful because the context is set up and
    6671             :              * tore down for each member of the multixact, but in reality it
    6672             :              * should be barely noticeable, and it avoids duplicate code.
    6673             :              */
    6674         168 :             if (nowait)
    6675             :             {
    6676           8 :                 result = ConditionalXactLockTableWait(memxid);
    6677           8 :                 if (!result)
    6678           8 :                     break;
    6679             :             }
    6680             :             else
    6681         160 :                 XactLockTableWait(memxid, rel, ctid, oper);
    6682             :         }
    6683             : 
    6684         110 :         pfree(members);
    6685             :     }
    6686             : 
    6687         110 :     if (remaining)
    6688          16 :         *remaining = remain;
    6689             : 
    6690         110 :     return result;
    6691             : }
    6692             : 
    6693             : /*
    6694             :  * MultiXactIdWait
    6695             :  *      Sleep on a MultiXactId.
    6696             :  *
    6697             :  * By the time we finish sleeping, someone else may have changed the Xmax
    6698             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    6699             :  *
    6700             :  * We return (in *remaining, if not NULL) the number of members that are still
    6701             :  * running, including any (non-aborted) subtransactions of our own transaction.
    6702             :  */
    6703             : static void
    6704         102 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
    6705             :                 Relation rel, ItemPointer ctid, XLTW_Oper oper,
    6706             :                 int *remaining)
    6707             : {
    6708         102 :     (void) Do_MultiXactIdWait(multi, status, infomask, false,
    6709             :                               rel, ctid, oper, remaining);
    6710         102 : }
    6711             : 
    6712             : /*
    6713             :  * ConditionalMultiXactIdWait
    6714             :  *      As above, but only lock if we can get the lock without blocking.
    6715             :  *
    6716             :  * By the time we finish sleeping, someone else may have changed the Xmax
    6717             :  * of the containing tuple, so the caller needs to iterate on us somehow.
    6718             :  *
    6719             :  * If the multixact is now all gone, return true.  Returns false if some
    6720             :  * transactions might still be running.
    6721             :  *
    6722             :  * We return (in *remaining, if not NULL) the number of members that are still
    6723             :  * running, including any (non-aborted) subtransactions of our own transaction.
    6724             :  */
    6725             : static bool
    6726           8 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
    6727             :                            uint16 infomask, Relation rel, int *remaining)
    6728             : {
    6729           8 :     return Do_MultiXactIdWait(multi, status, infomask, true,
    6730             :                               rel, NULL, XLTW_None, remaining);
    6731             : }
    6732             : 
    6733             : /*
    6734             :  * heap_tuple_needs_eventual_freeze
    6735             :  *
    6736             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    6737             :  * will eventually require freezing.  Similar to heap_tuple_needs_freeze,
    6738             :  * but there's no cutoff, since we're trying to figure out whether freezing
    6739             :  * will ever be needed, not whether it's needed now.
    6740             :  */
    6741             : bool
    6742      254462 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
    6743             : {
    6744             :     TransactionId xid;
    6745             : 
    6746             :     /*
    6747             :      * If xmin is a normal transaction ID, this tuple is definitely not
    6748             :      * frozen.
    6749             :      */
    6750      254462 :     xid = HeapTupleHeaderGetXmin(tuple);
    6751      254462 :     if (TransactionIdIsNormal(xid))
    6752        3020 :         return true;
    6753             : 
    6754             :     /*
    6755             :      * If xmax is a valid xact or multixact, this tuple is also not frozen.
    6756             :      */
    6757      251442 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    6758             :     {
    6759             :         MultiXactId multi;
    6760             : 
    6761           0 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    6762           0 :         if (MultiXactIdIsValid(multi))
    6763           0 :             return true;
    6764             :     }
    6765             :     else
    6766             :     {
    6767      251442 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    6768      251442 :         if (TransactionIdIsNormal(xid))
    6769          10 :             return true;
    6770             :     }
    6771             : 
    6772      251432 :     if (tuple->t_infomask & HEAP_MOVED)
    6773             :     {
    6774           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    6775           0 :         if (TransactionIdIsNormal(xid))
    6776           0 :             return true;
    6777             :     }
    6778             : 
    6779      251432 :     return false;
    6780             : }
    6781             : 
    6782             : /*
    6783             :  * heap_tuple_needs_freeze
    6784             :  *
    6785             :  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
    6786             :  * are older than the specified cutoff XID or MultiXactId.  If so, return true.
    6787             :  *
    6788             :  * It doesn't matter whether the tuple is alive or dead, we are checking
    6789             :  * to see if a tuple needs to be removed or frozen to avoid wraparound.
    6790             :  *
    6791             :  * NB: Cannot rely on hint bits here, they might not be set after a crash or
    6792             :  * on a standby.
    6793             :  */
    6794             : bool
    6795          42 : heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
    6796             :                         MultiXactId cutoff_multi, Buffer buf)
    6797             : {
    6798             :     TransactionId xid;
    6799             : 
    6800          42 :     xid = HeapTupleHeaderGetXmin(tuple);
    6801          84 :     if (TransactionIdIsNormal(xid) &&
    6802          42 :         TransactionIdPrecedes(xid, cutoff_xid))
    6803           0 :         return true;
    6804             : 
    6805             :     /*
    6806             :      * The considerations for multixacts are complicated; look at
    6807             :      * heap_prepare_freeze_tuple for justifications.  This routine had better
    6808             :      * be in sync with that one!
    6809             :      */
    6810          42 :     if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
    6811             :     {
    6812             :         MultiXactId multi;
    6813             : 
    6814           0 :         multi = HeapTupleHeaderGetRawXmax(tuple);
    6815           0 :         if (!MultiXactIdIsValid(multi))
    6816             :         {
    6817             :             /* no xmax set, ignore */
    6818             :             ;
    6819             :         }
    6820           0 :         else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
    6821           0 :             return true;
    6822           0 :         else if (MultiXactIdPrecedes(multi, cutoff_multi))
    6823           0 :             return true;
    6824             :         else
    6825             :         {
    6826             :             MultiXactMember *members;
    6827             :             int         nmembers;
    6828             :             int         i;
    6829             : 
    6830             :             /* need to check whether any member of the mxact is too old */
    6831             : 
    6832           0 :             nmembers = GetMultiXactIdMembers(multi, &members, false,
    6833           0 :                                              HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
    6834             : 
    6835           0 :             for (i = 0; i < nmembers; i++)
    6836             :             {
    6837           0 :                 if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
    6838             :                 {
    6839           0 :                     pfree(members);
    6840           0 :                     return true;
    6841             :                 }
    6842             :             }
    6843           0 :             if (nmembers > 0)
    6844           0 :                 pfree(members);
    6845             :         }
    6846             :     }
    6847             :     else
    6848             :     {
    6849          42 :         xid = HeapTupleHeaderGetRawXmax(tuple);
    6850          42 :         if (TransactionIdIsNormal(xid) &&
    6851           0 :             TransactionIdPrecedes(xid, cutoff_xid))
    6852           0 :             return true;
    6853             :     }
    6854             : 
    6855          42 :     if (tuple->t_infomask & HEAP_MOVED)
    6856             :     {
    6857           0 :         xid = HeapTupleHeaderGetXvac(tuple);
    6858           0 :         if (TransactionIdIsNormal(xid) &&
    6859           0 :             TransactionIdPrecedes(xid, cutoff_xid))
    6860           0 :             return true;
    6861             :     }
    6862             : 
    6863          42 :     return false;
    6864             : }
    6865             : 
    6866             : /*
    6867             :  * If 'tuple' contains any visible XID greater than latestRemovedXid,
    6868             :  * ratchet forwards latestRemovedXid to the greatest one found.
    6869             :  * This is used as the basis for generating Hot Standby conflicts, so
    6870             :  * if a tuple was never visible then removing it should not conflict
    6871             :  * with queries.
    6872             :  */
    6873             : void
    6874     1596198 : HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
    6875             :                                        TransactionId *latestRemovedXid)
    6876             : {
    6877     1596198 :     TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
    6878     1596198 :     TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
    6879     1596198 :     TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
    6880             : 
    6881     1596198 :     if (tuple->t_infomask & HEAP_MOVED)
    6882             :     {
    6883           0 :         if (TransactionIdPrecedes(*latestRemovedXid, xvac))
    6884           0 :             *latestRemovedXid = xvac;
    6885             :     }
    6886             : 
    6887             :     /*
    6888             :      * Ignore tuples inserted by an aborted transaction or if the tuple was
    6889             :      * updated/deleted by the inserting transaction.
    6890             :      *
    6891             :      * Look for a committed hint bit, or if no xmin bit is set, check clog.
    6892             :      * This needs to work on both master and standby, where it is used to
    6893             :      * assess btree delete records.
    6894             :      */
    6895     1596198 :     if (HeapTupleHeaderXminCommitted(tuple) ||
    6896       27834 :         (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
    6897             :     {
    6898     2954766 :         if (xmax != xmin &&
    6899     1384770 :             TransactionIdFollows(xmax, *latestRemovedXid))
    6900      118434 :             *latestRemovedXid = xmax;
    6901             :     }
    6902             : 
    6903             :     /* *latestRemovedXid may still be invalid at end */
    6904     1596198 : }
    6905             : 
    6906             : #ifdef USE_PREFETCH
    6907             : /*
    6908             :  * Helper function for heap_compute_xid_horizon_for_tuples.  Issue prefetch
    6909             :  * requests for the number of buffers indicated by prefetch_count.  The
    6910             :  * prefetch_state keeps track of all the buffers that we can prefetch and
    6911             :  * which ones have already been prefetched; each call to this function picks
    6912             :  * up where the previous call left off.
    6913             :  */
    6914             : static void
    6915       10910 : xid_horizon_prefetch_buffer(Relation rel,
    6916             :                             XidHorizonPrefetchState *prefetch_state,
    6917             :                             int prefetch_count)
    6918             : {
    6919       10910 :     BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
    6920       10910 :     int         count = 0;
    6921             :     int         i;
    6922       10910 :     int         nitems = prefetch_state->nitems;
    6923       10910 :     ItemPointerData *tids = prefetch_state->tids;
    6924             : 
    6925       64248 :     for (i = prefetch_state->next_item;
    6926       53548 :          i < nitems && count < prefetch_count;
    6927       53338 :          i++)
    6928             :     {
    6929       53338 :         ItemPointer htid = &tids[i];
    6930             : 
    6931       53338 :         if (cur_hblkno == InvalidBlockNumber ||
    6932       49630 :             ItemPointerGetBlockNumber(htid) != cur_hblkno)
    6933             :         {
    6934        7202 :             cur_hblkno = ItemPointerGetBlockNumber(htid);
    6935        7202 :             PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
    6936        7202 :             count++;
    6937             :         }
    6938             :     }
    6939             : 
    6940             :     /*
    6941             :      * Save the prefetch position so that next time we can continue from that
    6942             :      * position.
    6943             :      */
    6944       10910 :     prefetch_state->next_item = i;
    6945       10910 :     prefetch_state->cur_hblkno = cur_hblkno;
    6946       10910 : }
    6947             : #endif
    6948             : 
    6949             : /*
    6950             :  * Get the latestRemovedXid from the heap pages pointed at by the index
    6951             :  * tuples being deleted.
    6952             :  *
    6953             :  * We used to do this during recovery rather than on the primary, but that
    6954             :  * approach now appears inferior.  It meant that the master could generate
    6955             :  * a lot of work for the standby without any back-pressure to slow down the
    6956             :  * master, and it required the standby to have reached consistency, whereas
    6957             :  * we want to have correct information available even before that point.
    6958             :  *
    6959             :  * It's possible for this to generate a fair amount of I/O, since we may be
    6960             :  * deleting hundreds of tuples from a single index block.  To amortize that
    6961             :  * cost to some degree, this uses prefetching and combines repeat accesses to
    6962             :  * the same block.
    6963             :  */
    6964             : TransactionId
    6965        3708 : heap_compute_xid_horizon_for_tuples(Relation rel,
    6966             :                                     ItemPointerData *tids,
    6967             :                                     int nitems)
    6968             : {
    6969        3708 :     TransactionId latestRemovedXid = InvalidTransactionId;
    6970             :     BlockNumber hblkno;
    6971        3708 :     Buffer      buf = InvalidBuffer;
    6972             :     Page        hpage;
    6973             : #ifdef USE_PREFETCH
    6974             :     XidHorizonPrefetchState prefetch_state;
    6975             :     int         prefetch_distance;
    6976             : #endif
    6977             : 
    6978             :     /*
    6979             :      * Sort to avoid repeated lookups for the same page, and to make it more
    6980             :      * likely to access items in an efficient order. In particular, this
    6981             :      * ensures that if there are multiple pointers to the same page, they all
    6982             :      * get processed looking up and locking the page just once.
    6983             :      */
    6984        3708 :     qsort((void *) tids, nitems, sizeof(ItemPointerData),
    6985             :           (int (*) (const void *, const void *)) ItemPointerCompare);
    6986             : 
    6987             : #ifdef USE_PREFETCH
    6988             :     /* Initialize prefetch state. */
    6989        3708 :     prefetch_state.cur_hblkno = InvalidBlockNumber;
    6990        3708 :     prefetch_state.next_item = 0;
    6991        3708 :     prefetch_state.nitems = nitems;
    6992        3708 :     prefetch_state.tids = tids;
    6993             : 
    6994             :     /*
    6995             :      * Compute the prefetch distance that we will attempt to maintain.
    6996             :      *
    6997             :      * Since the caller holds a buffer lock somewhere in rel, we'd better make
    6998             :      * sure that isn't a catalog relation before we call code that does
    6999             :      * syscache lookups, to avoid risk of deadlock.
    7000             :      */
    7001        3708 :     if (IsCatalogRelation(rel))
    7002        3632 :         prefetch_distance = maintenance_io_concurrency;
    7003             :     else
    7004             :         prefetch_distance =
    7005          76 :             get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
    7006             : 
    7007             :     /* Start prefetching. */
    7008        3708 :     xid_horizon_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
    7009             : #endif
    7010             : 
    7011             :     /* Iterate over all tids, and check their horizon */
    7012        3708 :     hblkno = InvalidBlockNumber;
    7013        3708 :     hpage = NULL;
    7014       57046 :     for (int i = 0; i < nitems; i++)
    7015             :     {
    7016       53338 :         ItemPointer htid = &tids[i];
    7017             :         ItemId      hitemid;
    7018             :         OffsetNumber hoffnum;
    7019             : 
    7020             :         /*
    7021             :          * Read heap buffer, but avoid refetching if it's the same block as
    7022             :          * required for the last tid.
    7023             :          */
    7024       53338 :         if (hblkno == InvalidBlockNumber ||
    7025       49630 :             ItemPointerGetBlockNumber(htid) != hblkno)
    7026             :         {
    7027             :             /* release old buffer */
    7028        7202 :             if (BufferIsValid(buf))
    7029             :             {
    7030        3494 :                 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    7031        3494 :                 ReleaseBuffer(buf);
    7032             :             }
    7033             : 
    7034        7202 :             hblkno = ItemPointerGetBlockNumber(htid);
    7035             : 
    7036        7202 :             buf = ReadBuffer(rel, hblkno);
    7037             : 
    7038             : #ifdef USE_PREFETCH
    7039             : 
    7040             :             /*
    7041             :              * To maintain the prefetch distance, prefetch one more page for
    7042             :              * each page we read.
    7043             :              */
    7044        7202 :             xid_horizon_prefetch_buffer(rel, &prefetch_state, 1);
    7045             : #endif
    7046             : 
    7047        7202 :             hpage = BufferGetPage(buf);
    7048             : 
    7049        7202 :             LockBuffer(buf, BUFFER_LOCK_SHARE);
    7050             :         }
    7051             : 
    7052       53338 :         hoffnum = ItemPointerGetOffsetNumber(htid);
    7053       53338 :         hitemid = PageGetItemId(hpage, hoffnum);
    7054             : 
    7055             :         /*
    7056             :          * Follow any redirections until we find something useful.
    7057             :          */
    7058       54418 :         while (ItemIdIsRedirected(hitemid))
    7059             :         {
    7060        1080 :             hoffnum = ItemIdGetRedirect(hitemid);
    7061        1080 :             hitemid = PageGetItemId(hpage, hoffnum);
    7062        1080 :             CHECK_FOR_INTERRUPTS();
    7063             :         }
    7064             : 
    7065             :         /*
    7066             :          * If the heap item has storage, then read the header and use that to
    7067             :          * set latestRemovedXid.
    7068             :          *
    7069             :          * Some LP_DEAD items may not be accessible, so we ignore them.
    7070             :          */
    7071       53338 :         if (ItemIdHasStorage(hitemid))
    7072             :         {
    7073             :             HeapTupleHeader htuphdr;
    7074             : 
    7075        9552 :             htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
    7076             : 
    7077        9552 :             HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
    7078             :         }
    7079       43786 :         else if (ItemIdIsDead(hitemid))
    7080             :         {
    7081             :             /*
    7082             :              * Conjecture: if hitemid is dead then it had xids before the xids
    7083             :              * marked on LP_NORMAL items. So we just ignore this item and move
    7084             :              * onto the next, for the purposes of calculating
    7085             :              * latestRemovedXid.
    7086             :              */
    7087             :         }
    7088             :         else
    7089             :             Assert(!ItemIdIsUsed(hitemid));
    7090             : 
    7091             :     }
    7092             : 
    7093        3708 :     if (BufferIsValid(buf))
    7094             :     {
    7095        3708 :         LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    7096        3708 :         ReleaseBuffer(buf);
    7097             :     }
    7098             : 
    7099             :     /*
    7100             :      * If all heap tuples were LP_DEAD then we will be returning
    7101             :      * InvalidTransactionId here, which avoids conflicts. This matches
    7102             :      * existing logic which assumes that LP_DEAD tuples must already be older
    7103             :      * than the latestRemovedXid on the cleanup record that set them as
    7104             :      * LP_DEAD, hence must already have generated a conflict.
    7105             :      */
    7106             : 
    7107        3708 :     return latestRemovedXid;
    7108             : }
    7109             : 
    7110             : /*
    7111             :  * Perform XLogInsert to register a heap cleanup info message. These
    7112             :  * messages are sent once per VACUUM and are required because
    7113             :  * of the phasing of removal operations during a lazy VACUUM.
    7114             :  * see comments for vacuum_log_cleanup_info().
    7115             :  */
    7116             : XLogRecPtr
    7117        1036 : log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
    7118             : {
    7119             :     xl_heap_cleanup_info xlrec;
    7120             :     XLogRecPtr  recptr;
    7121             : 
    7122        1036 :     xlrec.node = rnode;
    7123        1036 :     xlrec.latestRemovedXid = latestRemovedXid;
    7124             : 
    7125        1036 :     XLogBeginInsert();
    7126        1036 :     XLogRegisterData((char *) &xlrec, SizeOfHeapCleanupInfo);
    7127             : 
    7128        1036 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO);
    7129             : 
    7130        1036 :     return recptr;
    7131             : }
    7132             : 
    7133             : /*
    7134             :  * Perform XLogInsert for a heap-clean operation.  Caller must already
    7135             :  * have modified the buffer and marked it dirty.
    7136             :  *
    7137             :  * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
    7138             :  * zero-based tuple indexes.  Now they are one-based like other uses
    7139             :  * of OffsetNumber.
    7140             :  *
    7141             :  * We also include latestRemovedXid, which is the greatest XID present in
    7142             :  * the removed tuples. That allows recovery processing to cancel or wait
    7143             :  * for long standby queries that can still see these tuples.
    7144             :  */
    7145             : XLogRecPtr
    7146       97822 : log_heap_clean(Relation reln, Buffer buffer,
    7147             :                OffsetNumber *redirected, int nredirected,
    7148             :                OffsetNumber *nowdead, int ndead,
    7149             :                OffsetNumber *nowunused, int nunused,
    7150             :                TransactionId latestRemovedXid)
    7151             : {
    7152             :     xl_heap_clean xlrec;
    7153             :     XLogRecPtr  recptr;
    7154             : 
    7155             :     /* Caller should not call me on a non-WAL-logged relation */
    7156             :     Assert(RelationNeedsWAL(reln));
    7157             : 
    7158       97822 :     xlrec.latestRemovedXid = latestRemovedXid;
    7159       97822 :     xlrec.nredirected = nredirected;
    7160       97822 :     xlrec.ndead = ndead;
    7161             : 
    7162       97822 :     XLogBeginInsert();
    7163       97822 :     XLogRegisterData((char *) &xlrec, SizeOfHeapClean);
    7164             : 
    7165       97822 :     XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    7166             : 
    7167             :     /*
    7168             :      * The OffsetNumber arrays are not actually in the buffer, but we pretend
    7169             :      * that they are.  When XLogInsert stores the whole buffer, the offset
    7170             :      * arrays need not be stored too.  Note that even if all three arrays are
    7171             :      * empty, we want to expose the buffer as a candidate for whole-page
    7172             :      * storage, since this record type implies a defragmentation operation
    7173             :      * even if no line pointers changed state.
    7174             :      */
    7175       97822 :     if (nredirected > 0)
    7176       35598 :         XLogRegisterBufData(0, (char *) redirected,
    7177             :                             nredirected * sizeof(OffsetNumber) * 2);
    7178             : 
    7179       97822 :     if (ndead > 0)
    7180       48724 :         XLogRegisterBufData(0, (char *) nowdead,
    7181             :                             ndead * sizeof(OffsetNumber));
    7182             : 
    7183       97822 :     if (nunused > 0)
    7184       38202 :         XLogRegisterBufData(0, (char *) nowunused,
    7185             :                             nunused * sizeof(OffsetNumber));
    7186             : 
    7187       97822 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEAN);
    7188             : 
    7189       97822 :     return recptr;
    7190             : }
    7191             : 
    7192             : /*
    7193             :  * Perform XLogInsert for a heap-freeze operation.  Caller must have already
    7194             :  * modified the buffer and marked it dirty.
    7195             :  */
    7196             : XLogRecPtr
    7197       86238 : log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
    7198             :                 xl_heap_freeze_tuple *tuples, int ntuples)
    7199             : {
    7200             :     xl_heap_freeze_page xlrec;
    7201             :     XLogRecPtr  recptr;
    7202             : 
    7203             :     /* Caller should not call me on a non-WAL-logged relation */
    7204             :     Assert(RelationNeedsWAL(reln));
    7205             :     /* nor when there are no tuples to freeze */
    7206             :     Assert(ntuples > 0);
    7207             : 
    7208       86238 :     xlrec.cutoff_xid = cutoff_xid;
    7209       86238 :     xlrec.ntuples = ntuples;
    7210             : 
    7211       86238 :     XLogBeginInsert();
    7212       86238 :     XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
    7213             : 
    7214             :     /*
    7215             :      * The freeze plan array is not actually in the buffer, but pretend that
    7216             :      * it is.  When XLogInsert stores the whole buffer, the freeze plan need
    7217             :      * not be stored too.
    7218             :      */
    7219       86238 :     XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
    7220       86238 :     XLogRegisterBufData(0, (char *) tuples,
    7221             :                         ntuples * sizeof(xl_heap_freeze_tuple));
    7222             : 
    7223       86238 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
    7224             : 
    7225       86238 :     return recptr;
    7226             : }
    7227             : 
    7228             : /*
    7229             :  * Perform XLogInsert for a heap-visible operation.  'block' is the block
    7230             :  * being marked all-visible, and vm_buffer is the buffer containing the
    7231             :  * corresponding visibility map block.  Both should have already been modified
    7232             :  * and dirtied.
    7233             :  *
    7234             :  * If checksums are enabled, we also generate a full-page image of
    7235             :  * heap_buffer, if necessary.
    7236             :  */
    7237             : XLogRecPtr
    7238      177584 : log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
    7239             :                  TransactionId cutoff_xid, uint8 vmflags)
    7240             : {
    7241             :     xl_heap_visible xlrec;
    7242             :     XLogRecPtr  recptr;
    7243             :     uint8       flags;
    7244             : 
    7245             :     Assert(BufferIsValid(heap_buffer));
    7246             :     Assert(BufferIsValid(vm_buffer));
    7247             : 
    7248      177584 :     xlrec.cutoff_xid = cutoff_xid;
    7249      177584 :     xlrec.flags = vmflags;
    7250      177584 :     XLogBeginInsert();
    7251      177584 :     XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
    7252             : 
    7253      177584 :     XLogRegisterBuffer(0, vm_buffer, 0);
    7254             : 
    7255      177584 :     flags = REGBUF_STANDARD;
    7256      177584 :     if (!XLogHintBitIsNeeded())
    7257      176286 :         flags |= REGBUF_NO_IMAGE;
    7258      177584 :     XLogRegisterBuffer(1, heap_buffer, flags);
    7259             : 
    7260      177584 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
    7261             : 
    7262      177584 :     return recptr;
    7263             : }
    7264             : 
    7265             : /*
    7266             :  * Perform XLogInsert for a heap-update operation.  Caller must already
    7267             :  * have modified the buffer(s) and marked them dirty.
    7268             :  */
    7269             : static XLogRecPtr
    7270      336988 : log_heap_update(Relation reln, Buffer oldbuf,
    7271             :                 Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
    7272             :                 HeapTuple old_key_tuple,
    7273             :                 bool all_visible_cleared, bool new_all_visible_cleared)
    7274             : {
    7275             :     xl_heap_update xlrec;
    7276             :     xl_heap_header xlhdr;
    7277             :     xl_heap_header xlhdr_idx;
    7278             :     uint8       info;
    7279             :     uint16      prefix_suffix[2];
    7280      336988 :     uint16      prefixlen = 0,
    7281      336988 :                 suffixlen = 0;
    7282             :     XLogRecPtr  recptr;
    7283      336988 :     Page        page = BufferGetPage(newbuf);
    7284      336988 :     bool        need_tuple_data = RelationIsLogicallyLogged(reln);
    7285             :     bool        init;
    7286             :     int         bufflags;
    7287             : 
    7288             :     /* Caller should not call me on a non-WAL-logged relation */
    7289             :     Assert(RelationNeedsWAL(reln));
    7290             : 
    7291      336988 :     XLogBeginInsert();
    7292             : 
    7293      336988 :     if (HeapTupleIsHeapOnly(newtup))
    7294      204414 :         info = XLOG_HEAP_HOT_UPDATE;
    7295             :     else
    7296      132574 :         info = XLOG_HEAP_UPDATE;
    7297             : 
    7298             :     /*
    7299             :      * If the old and new tuple are on the same page, we only need to log the
    7300             :      * parts of the new tuple that were changed.  That saves on the amount of
    7301             :      * WAL we need to write.  Currently, we just count any unchanged bytes in
    7302             :      * the beginning and end of the tuple.  That's quick to check, and
    7303             :      * perfectly covers the common case that only one field is updated.
    7304             :      *
    7305             :      * We could do this even if the old and new tuple are on different pages,
    7306             :      * but only if we don't make a full-page image of the old page, which is
    7307             :      * difficult to know in advance.  Also, if the old tuple is corrupt for
    7308             :      * some reason, it would allow the corruption to propagate the new page,
    7309             :      * so it seems best to avoid.  Under the general assumption that most
    7310             :      * updates tend to create the new tuple version on the same page, there
    7311             :      * isn't much to be gained by doing this across pages anyway.
    7312             :      *
    7313             :      * Skip this if we're taking a full-page image of the new page, as we
    7314             :      * don't include the new tuple in the WAL record in that case.  Also
    7315             :      * disable if wal_level='logical', as logical decoding needs to be able to
    7316             :      * read the new tuple in whole from the WAL record alone.
    7317             :      */
    7318      336988 :     if (oldbuf == newbuf && !need_tuple_data &&
    7319      218058 :         !XLogCheckBufferNeedsBackup(newbuf))
    7320             :     {
    7321      216924 :         char       *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
    7322      216924 :         char       *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
    7323      216924 :         int         oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
    7324      216924 :         int         newlen = newtup->t_len - newtup->t_data->t_hoff;
    7325             : 
    7326             :         /* Check for common prefix between old and new tuple */
    7327    20781628 :         for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
    7328             :         {
    7329    20726332 :             if (newp[prefixlen] != oldp[prefixlen])
    7330      161628 :                 break;
    7331             :         }
    7332             : 
    7333             :         /*
    7334             :          * Storing the length of the prefix takes 2 bytes, so we need to save
    7335             :          * at least 3 bytes or there's no point.
    7336             :          */
    7337      216924 :         if (prefixlen < 3)
    7338       41844 :             prefixlen = 0;
    7339             : 
    7340             :         /* Same for suffix */
    7341     4672744 :         for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
    7342             :         {
    7343     4617206 :             if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
    7344      161386 :                 break;
    7345             :         }
    7346      216924 :         if (suffixlen < 3)
    7347       66216 :             suffixlen = 0;
    7348             :     }
    7349             : 
    7350             :     /* Prepare main WAL data chain */
    7351      336988 :     xlrec.flags = 0;
    7352      336988 :     if (all_visible_cleared)
    7353        1742 :         xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
    7354      336988 :     if (new_all_visible_cleared)
    7355         788 :         xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
    7356      336988 :     if (prefixlen > 0)
    7357      175080 :         xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
    7358      336988 :     if (suffixlen > 0)
    7359      150708 :         xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
    7360      336988 :     if (need_tuple_data)
    7361             :     {
    7362       15618 :         xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
    7363       15618 :         if (old_key_tuple)
    7364             :         {
    7365         336 :             if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
    7366         106 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
    7367             :             else
    7368         230 :                 xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
    7369             :         }
    7370             :     }
    7371             : 
    7372             :     /* If new tuple is the single and first tuple on page... */
    7373      336988 :     if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
    7374        4120 :         PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
    7375             :     {
    7376        3592 :         info |= XLOG_HEAP_INIT_PAGE;
    7377        3592 :         init = true;
    7378             :     }
    7379             :     else
    7380      333396 :         init = false;
    7381             : 
    7382             :     /* Prepare WAL data for the old page */
    7383      336988 :     xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
    7384      336988 :     xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
    7385      673976 :     xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
    7386      336988 :                                               oldtup->t_data->t_infomask2);
    7387             : 
    7388             :     /* Prepare WAL data for the new page */
    7389      336988 :     xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
    7390      336988 :     xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
    7391             : 
    7392      336988 :     bufflags = REGBUF_STANDARD;
    7393      336988 :     if (init)
    7394        3592 :         bufflags |= REGBUF_WILL_INIT;
    7395      336988 :     if (need_tuple_data)
    7396       15618 :         bufflags |= REGBUF_KEEP_DATA;
    7397             : 
    7398      336988 :     XLogRegisterBuffer(0, newbuf, bufflags);
    7399      336988 :     if (oldbuf != newbuf)
    7400      118250 :         XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
    7401             : 
    7402      336988 :     XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate);
    7403             : 
    7404             :     /*
    7405             :      * Prepare WAL data for the new tuple.
    7406             :      */
    7407      336988 :     if (prefixlen > 0 || suffixlen > 0)
    7408             :     {
    7409      216444 :         if (prefixlen > 0 && suffixlen > 0)
    7410             :         {
    7411      109344 :             prefix_suffix[0] = prefixlen;
    7412      109344 :             prefix_suffix[1] = suffixlen;
    7413      109344 :             XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2);
    7414             :         }
    7415      107100 :         else if (prefixlen > 0)
    7416             :         {
    7417       65736 :             XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16));
    7418             :         }
    7419             :         else
    7420             :         {
    7421       41364 :             XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16));
    7422             :         }
    7423             :     }
    7424             : 
    7425      336988 :     xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
    7426      336988 :     xlhdr.t_infomask = newtup->t_data->t_infomask;
    7427      336988 :     xlhdr.t_hoff = newtup->t_data->t_hoff;
    7428             :     Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
    7429             : 
    7430             :     /*
    7431             :      * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
    7432             :      *
    7433             :      * The 'data' doesn't include the common prefix or suffix.
    7434             :      */
    7435      336988 :     XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
    7436      336988 :     if (prefixlen == 0)
    7437             :     {
    7438      323816 :         XLogRegisterBufData(0,
    7439      161908 :                             ((char *) newtup->t_data) + SizeofHeapTupleHeader,
    7440      161908 :                             newtup->t_len - SizeofHeapTupleHeader - suffixlen);
    7441             :     }
    7442             :     else
    7443             :     {
    7444             :         /*
    7445             :          * Have to write the null bitmap and data after the common prefix as
    7446             :          * two separate rdata entries.
    7447             :          */
    7448             :         /* bitmap [+ padding] [+ oid] */
    7449      175080 :         if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
    7450             :         {
    7451      350160 :             XLogRegisterBufData(0,
    7452      175080 :                                 ((char *) newtup->t_data) + SizeofHeapTupleHeader,
    7453      175080 :                                 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
    7454             :         }
    7455             : 
    7456             :         /* data after common prefix */
    7457      350160 :         XLogRegisterBufData(0,
    7458      175080 :                             ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen,
    7459      175080 :                             newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
    7460             :     }
    7461             : 
    7462             :     /* We need to log a tuple identity */
    7463      336988 :     if (need_tuple_data && old_key_tuple)
    7464             :     {
    7465             :         /* don't really need this, but its more comfy to decode */
    7466         336 :         xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
    7467         336 :         xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
    7468         336 :         xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
    7469             : 
    7470         336 :         XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
    7471             : 
    7472             :         /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
    7473         336 :         XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
    7474         336 :                          old_key_tuple->t_len - SizeofHeapTupleHeader);
    7475             :     }
    7476             : 
    7477             :     /* filtering by origin on a row level is much more efficient */
    7478      336988 :     XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
    7479             : 
    7480      336988 :     recptr = XLogInsert(RM_HEAP_ID, info);
    7481             : 
    7482      336988 :     return recptr;
    7483             : }
    7484             : 
    7485             : /*
    7486             :  * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
    7487             :  *
    7488             :  * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
    7489             :  * tuples.
    7490             :  */
    7491             : static XLogRecPtr
    7492       28986 : log_heap_new_cid(Relation relation, HeapTuple tup)
    7493             : {
    7494             :     xl_heap_new_cid xlrec;
    7495             : 
    7496             :     XLogRecPtr  recptr;
    7497       28986 :     HeapTupleHeader hdr = tup->t_data;
    7498             : 
    7499             :     Assert(ItemPointerIsValid(&tup->t_self));
    7500             :     Assert(tup->t_tableOid != InvalidOid);
    7501             : 
    7502       28986 :     xlrec.top_xid = GetTopTransactionId();
    7503       28986 :     xlrec.target_node = relation->rd_node;
    7504       28986 :     xlrec.target_tid = tup->t_self;
    7505             : 
    7506             :     /*
    7507             :      * If the tuple got inserted & deleted in the same TX we definitely have a
    7508             :      * combocid, set cmin and cmax.
    7509             :      */
    7510       28986 :     if (hdr->t_infomask & HEAP_COMBOCID)
    7511             :     {
    7512             :         Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
    7513             :         Assert(!HeapTupleHeaderXminInvalid(hdr));
    7514        3702 :         xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
    7515        3702 :         xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
    7516        3702 :         xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
    7517             :     }
    7518             :     /* No combocid, so only cmin or cmax can be set by this TX */
    7519             :     else
    7520             :     {
    7521             :         /*
    7522             :          * Tuple inserted.
    7523             :          *
    7524             :          * We need to check for LOCK ONLY because multixacts might be
    7525             :          * transferred to the new tuple in case of FOR KEY SHARE updates in
    7526             :          * which case there will be an xmax, although the tuple just got
    7527             :          * inserted.
    7528             :          */
    7529       25284 :         if (hdr->t_infomask & HEAP_XMAX_INVALID ||
    7530        4930 :             HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
    7531             :         {
    7532       20356 :             xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
    7533       20356 :             xlrec.cmax = InvalidCommandId;
    7534             :         }
    7535             :         /* Tuple from a different tx updated or deleted. */
    7536             :         else
    7537             :         {
    7538        4928 :             xlrec.cmin = InvalidCommandId;
    7539        4928 :             xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
    7540             : 
    7541             :         }
    7542       25284 :         xlrec.combocid = InvalidCommandId;
    7543             :     }
    7544             : 
    7545             :     /*
    7546             :      * Note that we don't need to register the buffer here, because this
    7547             :      * operation does not modify the page. The insert/update/delete that
    7548             :      * called us certainly did, but that's WAL-logged separately.
    7549             :      */
    7550       28986 :     XLogBeginInsert();
    7551       28986 :     XLogRegisterData((char *) &xlrec, SizeOfHeapNewCid);
    7552             : 
    7553             :     /* will be looked at irrespective of origin */
    7554             : 
    7555       28986 :     recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
    7556             : 
    7557       28986 :     return recptr;
    7558             : }
    7559             : 
    7560             : /*
    7561             :  * Build a heap tuple representing the configured REPLICA IDENTITY to represent
    7562             :  * the old tuple in a UPDATE or DELETE.
    7563             :  *
    7564             :  * Returns NULL if there's no need to log an identity or if there's no suitable
    7565             :  * key defined.
    7566             :  *
    7567             :  * key_changed should be false if caller knows that no replica identity
    7568             :  * columns changed value.  It's always true in the DELETE case.
    7569             :  *
    7570             :  * *copy is set to true if the returned tuple is a modified copy rather than
    7571             :  * the same tuple that was passed in.
    7572             :  */
    7573             : static HeapTuple
    7574     1957900 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed,
    7575             :                        bool *copy)
    7576             : {
    7577     1957900 :     TupleDesc   desc = RelationGetDescr(relation);
    7578     1957900 :     char        replident = relation->rd_rel->relreplident;
    7579             :     Bitmapset  *idattrs;
    7580             :     HeapTuple   key_tuple;
    7581             :     bool        nulls[MaxHeapAttributeNumber];
    7582             :     Datum       values[MaxHeapAttributeNumber];
    7583             : 
    7584     1957900 :     *copy = false;
    7585             : 
    7586     1957900 :     if (!RelationIsLogicallyLogged(relation))
    7587     1918790 :         return NULL;
    7588             : 
    7589       39110 :     if (replident == REPLICA_IDENTITY_NOTHING)
    7590         468 :         return NULL;
    7591             : 
    7592       38642 :     if (replident == REPLICA_IDENTITY_FULL)
    7593             :     {
    7594             :         /*
    7595             :          * When logging the entire old tuple, it very well could contain
    7596             :          * toasted columns. If so, force them to be inlined.
    7597             :          */
    7598         510 :         if (HeapTupleHasExternal(tp))
    7599             :         {
    7600           8 :             *copy = true;
    7601           8 :             tp = toast_flatten_tuple(tp, desc);
    7602             :         }
    7603         510 :         return tp;
    7604             :     }
    7605             : 
    7606             :     /* if the key hasn't changed and we're only logging the key, we're done */
    7607       38132 :     if (!key_changed)
    7608       15282 :         return NULL;
    7609             : 
    7610             :     /* find out the replica identity columns */
    7611       22850 :     idattrs = RelationGetIndexAttrBitmap(relation,
    7612             :                                          INDEX_ATTR_BITMAP_IDENTITY_KEY);
    7613             : 
    7614             :     /*
    7615             :      * If there's no defined replica identity columns, treat as !key_changed.
    7616             :      * (This case should not be reachable from heap_update, since that should
    7617             :      * calculate key_changed accurately.  But heap_delete just passes constant
    7618             :      * true for key_changed, so we can hit this case in deletes.)
    7619             :      */
    7620       22850 :     if (bms_is_empty(idattrs))
    7621       12030 :         return NULL;
    7622             : 
    7623             :     /*
    7624             :      * Construct a new tuple containing only the replica identity columns,
    7625             :      * with nulls elsewhere.  While we're at it, assert that the replica
    7626             :      * identity columns aren't null.
    7627             :      */
    7628       10820 :     heap_deform_tuple(tp, desc, values, nulls);
    7629             : 
    7630       32306 :     for (int i = 0; i < desc->natts; i++)
    7631             :     {
    7632       21486 :         if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
    7633             :                           idattrs))
    7634             :             Assert(!nulls[i]);
    7635             :         else
    7636       10666 :             nulls[i] = true;
    7637             :     }
    7638             : 
    7639       10820 :     key_tuple = heap_form_tuple(desc, values, nulls);
    7640       10820 :     *copy = true;
    7641             : 
    7642       10820 :     bms_free(idattrs);
    7643             : 
    7644             :     /*
    7645             :      * If the tuple, which by here only contains indexed columns, still has
    7646             :      * toasted columns, force them to be inlined. This is somewhat unlikely
    7647             :      * since there's limits on the size of indexed columns, so we don't
    7648             :      * duplicate toast_flatten_tuple()s functionality in the above loop over
    7649             :      * the indexed columns, even if it would be more efficient.
    7650             :      */
    7651       10820 :     if (HeapTupleHasExternal(key_tuple))
    7652             :     {
    7653           4 :         HeapTuple   oldtup = key_tuple;
    7654             : 
    7655           4 :         key_tuple = toast_flatten_tuple(oldtup, desc);
    7656           4 :         heap_freetuple(oldtup);
    7657             :     }
    7658             : 
    7659       10820 :     return key_tuple;
    7660             : }
    7661             : 
    7662             : /*
    7663             :  * Handles CLEANUP_INFO
    7664             :  */
    7665             : static void
    7666           0 : heap_xlog_cleanup_info(XLogReaderState *record)
    7667             : {
    7668           0 :     xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
    7669             : 
    7670           0 :     if (InHotStandby)
    7671           0 :         ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
    7672             : 
    7673             :     /*
    7674             :      * Actual operation is a no-op. Record type exists to provide a means for
    7675             :      * conflict processing to occur before we begin index vacuum actions. see
    7676             :      * vacuumlazy.c and also comments in btvacuumpage()
    7677             :      */
    7678             : 
    7679             :     /* Backup blocks are not used in cleanup_info records */
    7680             :     Assert(!XLogRecHasAnyBlockRefs(record));
    7681           0 : }
    7682             : 
    7683             : /*
    7684             :  * Handles XLOG_HEAP2_CLEAN record type
    7685             :  */
    7686             : static void
    7687        1858 : heap_xlog_clean(XLogReaderState *record)
    7688             : {
    7689        1858 :     XLogRecPtr  lsn = record->EndRecPtr;
    7690        1858 :     xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
    7691             :     Buffer      buffer;
    7692             :     RelFileNode rnode;
    7693             :     BlockNumber blkno;
    7694             :     XLogRedoAction action;
    7695             : 
    7696        1858 :     XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
    7697             : 
    7698             :     /*
    7699             :      * We're about to remove tuples. In Hot Standby mode, ensure that there's
    7700             :      * no queries running for which the removed tuples are still visible.
    7701             :      *
    7702             :      * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
    7703             :      * conflict on the records that cause MVCC failures for user queries. If
    7704             :      * latestRemovedXid is invalid, skip conflict processing.
    7705             :      */
    7706        1858 :     if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
    7707        1116 :         ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
    7708             : 
    7709             :     /*
    7710             :      * If we have a full-page image, restore it (using a cleanup lock) and
    7711             :      * we're done.
    7712             :      */
    7713        1858 :     action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true,
    7714             :                                            &buffer);
    7715        1858 :     if (action == BLK_NEEDS_REDO)
    7716             :     {
    7717        1856 :         Page        page = (Page) BufferGetPage(buffer);
    7718             :         OffsetNumber *end;
    7719             :         OffsetNumber *redirected;
    7720             :         OffsetNumber *nowdead;
    7721             :         OffsetNumber *nowunused;
    7722             :         int         nredirected;
    7723             :         int         ndead;
    7724             :         int         nunused;
    7725             :         Size        datalen;
    7726             : 
    7727        1856 :         redirected = (OffsetNumber *) XLogRecGetBlockData(record, 0, &datalen);
    7728             : 
    7729        1856 :         nredirected = xlrec->nredirected;
    7730        1856 :         ndead = xlrec->ndead;
    7731        1856 :         end = (OffsetNumber *) ((char *) redirected + datalen);
    7732        1856 :         nowdead = redirected + (nredirected * 2);
    7733        1856 :         nowunused = nowdead + ndead;
    7734        1856 :         nunused = (end - nowunused);
    7735             :         Assert(nunused >= 0);
    7736             : 
    7737             :         /* Update all line pointers per the record, and repair fragmentation */
    7738        1856 :         heap_page_prune_execute(buffer,
    7739             :                                 redirected, nredirected,
    7740             :                                 nowdead, ndead,
    7741             :                                 nowunused, nunused);
    7742             : 
    7743             :         /*
    7744             :          * Note: we don't worry about updating the page's prunability hints.
    7745             :          * At worst this will cause an extra prune cycle to occur soon.
    7746             :          */
    7747             : 
    7748        1856 :         PageSetLSN(page, lsn);
    7749        1856 :         MarkBufferDirty(buffer);
    7750             :     }
    7751             : 
    7752        1858 :     if (BufferIsValid(buffer))
    7753             :     {
    7754        1858 :         Size        freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
    7755             : 
    7756        1858 :         UnlockReleaseBuffer(buffer);
    7757             : 
    7758             :         /*
    7759             :          * After cleaning records from a page, it's useful to update the FSM
    7760             :          * about it, as it may cause the page become target for insertions
    7761             :          * later even if vacuum decides not to visit it (which is possible if
    7762             :          * gets marked all-visible.)
    7763             :          *
    7764             :          * Do this regardless of a full-page image being applied, since the
    7765             :          * FSM data is not in the page anyway.
    7766             :          */
    7767        1858 :         XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
    7768             :     }
    7769        1858 : }
    7770             : 
    7771             : /*
    7772             :  * Replay XLOG_HEAP2_VISIBLE record.
    7773             :  *
    7774             :  * The critical integrity requirement here is that we must never end up with
    7775             :  * a situation where the visibility map bit is set, and the page-level
    7776             :  * PD_ALL_VISIBLE bit is clear.  If that were to occur, then a subsequent
    7777             :  * page modification would fail to clear the visibility map bit.
    7778             :  */
    7779             : static void
    7780         480 : heap_xlog_visible(XLogReaderState *record)
    7781             : {
    7782         480 :     XLogRecPtr  lsn = record->EndRecPtr;
    7783         480 :     xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
    7784         480 :     Buffer      vmbuffer = InvalidBuffer;
    7785             :     Buffer      buffer;
    7786             :     Page        page;
    7787             :     RelFileNode rnode;
    7788             :     BlockNumber blkno;
    7789             :     XLogRedoAction action;
    7790             : 
    7791         480 :     XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
    7792             : 
    7793             :     /*
    7794             :      * If there are any Hot Standby transactions running that have an xmin
    7795             :      * horizon old enough that this page isn't all-visible for them, they
    7796             :      * might incorrectly decide that an index-only scan can skip a heap fetch.
    7797             :      *
    7798             :      * NB: It might be better to throw some kind of "soft" conflict here that
    7799             :      * forces any index-only scan that is in flight to perform heap fetches,
    7800             :      * rather than killing the transaction outright.
    7801             :      */
    7802         480 :     if (InHotStandby)
    7803         222 :         ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
    7804             : 
    7805             :     /*
    7806             :      * Read the heap page, if it still exists. If the heap file has dropped or
    7807             :      * truncated later in recovery, we don't need to update the page, but we'd
    7808             :      * better still update the visibility map.
    7809             :      */
    7810         480 :     action = XLogReadBufferForRedo(record, 1, &buffer);
    7811         480 :     if (action == BLK_NEEDS_REDO)
    7812             :     {
    7813             :         /*
    7814             :          * We don't bump the LSN of the heap page when setting the visibility
    7815             :          * map bit (unless checksums or wal_hint_bits is enabled, in which
    7816             :          * case we must), because that would generate an unworkable volume of
    7817             :          * full-page writes.  This exposes us to torn page hazards, but since
    7818             :          * we're not inspecting the existing page contents in any way, we
    7819             :          * don't care.
    7820             :          *
    7821             :          * However, all operations that clear the visibility map bit *do* bump
    7822             :          * the LSN, and those operations will only be replayed if the XLOG LSN
    7823             :          * follows the page LSN.  Thus, if the page LSN has advanced past our
    7824             :          * XLOG record's LSN, we mustn't mark the page all-visible, because
    7825             :          * the subsequent update won't be replayed to clear the flag.
    7826             :          */
    7827         384 :         page = BufferGetPage(buffer);
    7828             : 
    7829         384 :         PageSetAllVisible(page);
    7830             : 
    7831         384 :         MarkBufferDirty(buffer);
    7832             :     }
    7833             :     else if (action == BLK_RESTORED)
    7834             :     {
    7835             :         /*
    7836             :          * If heap block was backed up, we already restored it and there's
    7837             :          * nothing more to do. (This can only happen with checksums or
    7838             :          * wal_log_hints enabled.)
    7839             :          */
    7840             :     }
    7841             : 
    7842         480 :     if (BufferIsValid(buffer))
    7843             :     {
    7844         390 :         Size        space = PageGetFreeSpace(BufferGetPage(buffer));
    7845             : 
    7846         390 :         UnlockReleaseBuffer(buffer);
    7847             : 
    7848             :         /*
    7849             :          * Since FSM is not WAL-logged and only updated heuristically, it
    7850             :          * easily becomes stale in standbys.  If the standby is later promoted
    7851             :          * and runs VACUUM, it will skip updating individual free space
    7852             :          * figures for pages that became all-visible (or all-frozen, depending
    7853             :          * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
    7854             :          * propagates too optimistic free space values to upper FSM layers;
    7855             :          * later inserters try to use such pages only to find out that they
    7856             :          * are unusable.  This can cause long stalls when there are many such
    7857             :          * pages.
    7858             :          *
    7859             :          * Forestall those problems by updating FSM's idea about a page that
    7860             :          * is becoming all-visible or all-frozen.
    7861             :          *
    7862             :          * Do this regardless of a full-page image being applied, since the
    7863             :          * FSM data is not in the page anyway.
    7864             :          */
    7865         390 :         if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
    7866         390 :             XLogRecordPageWithFreeSpace(rnode, blkno, space);
    7867             :     }
    7868             : 
    7869             :     /*
    7870             :      * Even if we skipped the heap page update due to the LSN interlock, it's
    7871             :      * still safe to update the visibility map.  Any WAL record that clears
    7872             :      * the visibility map bit does so before checking the page LSN, so any
    7873             :      * bits that need to be cleared will still be cleared.
    7874             :      */
    7875         480 :     if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
    7876             :                                       &vmbuffer) == BLK_NEEDS_REDO)
    7877             :     {
    7878         452 :         Page        vmpage = BufferGetPage(vmbuffer);
    7879             :         Relation    reln;
    7880             : 
    7881             :         /* initialize the page if it was read as zeros */
    7882         452 :         if (PageIsNew(vmpage))
    7883           0 :             PageInit(vmpage, BLCKSZ, 0);
    7884             : 
    7885             :         /*
    7886             :          * XLogReadBufferForRedoExtended locked the buffer. But
    7887             :          * visibilitymap_set will handle locking itself.
    7888             :          */
    7889         452 :         LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
    7890             : 
    7891         452 :         reln = CreateFakeRelcacheEntry(rnode);
    7892         452 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    7893             : 
    7894             :         /*
    7895             :          * Don't set the bit if replay has already passed this point.
    7896             :          *
    7897             :          * It might be safe to do this unconditionally; if replay has passed
    7898             :          * this point, we'll replay at least as far this time as we did
    7899             :          * before, and if this bit needs to be cleared, the record responsible
    7900             :          * for doing so should be again replayed, and clear it.  For right
    7901             :          * now, out of an abundance of conservatism, we use the same test here
    7902             :          * we did for the heap page.  If this results in a dropped bit, no
    7903             :          * real harm is done; and the next VACUUM will fix it.
    7904             :          */
    7905         452 :         if (lsn > PageGetLSN(vmpage))
    7906         452 :             visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
    7907         452 :                               xlrec->cutoff_xid, xlrec->flags);
    7908             : 
    7909         452 :         ReleaseBuffer(vmbuffer);
    7910         452 :         FreeFakeRelcacheEntry(reln);
    7911             :     }
    7912          28 :     else if (BufferIsValid(vmbuffer))
    7913          28 :         UnlockReleaseBuffer(vmbuffer);
    7914         480 : }
    7915             : 
    7916             : /*
    7917             :  * Replay XLOG_HEAP2_FREEZE_PAGE records
    7918             :  */
    7919             : static void
    7920           0 : heap_xlog_freeze_page(XLogReaderState *record)
    7921             : {
    7922           0 :     XLogRecPtr  lsn = record->EndRecPtr;
    7923           0 :     xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record);
    7924           0 :     TransactionId cutoff_xid = xlrec->cutoff_xid;
    7925             :     Buffer      buffer;
    7926             :     int         ntup;
    7927             : 
    7928             :     /*
    7929             :      * In Hot Standby mode, ensure that there's no queries running which still
    7930             :      * consider the frozen xids as running.
    7931             :      */
    7932           0 :     if (InHotStandby)
    7933             :     {
    7934             :         RelFileNode rnode;
    7935           0 :         TransactionId latestRemovedXid = cutoff_xid;
    7936             : 
    7937           0 :         TransactionIdRetreat(latestRemovedXid);
    7938             : 
    7939           0 :         XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
    7940           0 :         ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
    7941             :     }
    7942             : 
    7943           0 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    7944             :     {
    7945           0 :         Page        page = BufferGetPage(buffer);
    7946             :         xl_heap_freeze_tuple *tuples;
    7947             : 
    7948           0 :         tuples = (xl_heap_freeze_tuple *) XLogRecGetBlockData(record, 0, NULL);
    7949             : 
    7950             :         /* now execute freeze plan for each frozen tuple */
    7951           0 :         for (ntup = 0; ntup < xlrec->ntuples; ntup++)
    7952             :         {
    7953             :             xl_heap_freeze_tuple *xlrec_tp;
    7954             :             ItemId      lp;
    7955             :             HeapTupleHeader tuple;
    7956             : 
    7957           0 :             xlrec_tp = &tuples[ntup];
    7958           0 :             lp = PageGetItemId(page, xlrec_tp->offset); /* offsets are one-based */
    7959           0 :             tuple = (HeapTupleHeader) PageGetItem(page, lp);
    7960             : 
    7961           0 :             heap_execute_freeze_tuple(tuple, xlrec_tp);
    7962             :         }
    7963             : 
    7964           0 :         PageSetLSN(page, lsn);
    7965           0 :         MarkBufferDirty(buffer);
    7966             :     }
    7967           0 :     if (BufferIsValid(buffer))
    7968           0 :         UnlockReleaseBuffer(buffer);
    7969           0 : }
    7970             : 
    7971             : /*
    7972             :  * Given an "infobits" field from an XLog record, set the correct bits in the
    7973             :  * given infomask and infomask2 for the tuple touched by the record.
    7974             :  *
    7975             :  * (This is the reverse of compute_infobits).
    7976             :  */
    7977             : static void
    7978      126516 : fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
    7979             : {
    7980      126516 :     *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
    7981             :                    HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
    7982      126516 :     *infomask2 &= ~HEAP_KEYS_UPDATED;
    7983             : 
    7984      126516 :     if (infobits & XLHL_XMAX_IS_MULTI)
    7985           0 :         *infomask |= HEAP_XMAX_IS_MULTI;
    7986      126516 :     if (infobits & XLHL_XMAX_LOCK_ONLY)
    7987       20020 :         *infomask |= HEAP_XMAX_LOCK_ONLY;
    7988      126516 :     if (infobits & XLHL_XMAX_EXCL_LOCK)
    7989       20020 :         *infomask |= HEAP_XMAX_EXCL_LOCK;
    7990             :     /* note HEAP_XMAX_SHR_LOCK isn't considered here */
    7991      126516 :     if (infobits & XLHL_XMAX_KEYSHR_LOCK)
    7992           0 :         *infomask |= HEAP_XMAX_KEYSHR_LOCK;
    7993             : 
    7994      126516 :     if (infobits & XLHL_KEYS_UPDATED)
    7995       46308 :         *infomask2 |= HEAP_KEYS_UPDATED;
    7996      126516 : }
    7997             : 
    7998             : static void
    7999       46316 : heap_xlog_delete(XLogReaderState *record)
    8000             : {
    8001       46316 :     XLogRecPtr  lsn = record->EndRecPtr;
    8002       46316 :     xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
    8003             :     Buffer      buffer;
    8004             :     Page        page;
    8005       46316 :     ItemId      lp = NULL;
    8006             :     HeapTupleHeader htup;
    8007             :     BlockNumber blkno;
    8008             :     RelFileNode target_node;
    8009             :     ItemPointerData target_tid;
    8010             : 
    8011       46316 :     XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
    8012       46316 :     ItemPointerSetBlockNumber(&target_tid, blkno);
    8013       46316 :     ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
    8014             : 
    8015             :     /*
    8016             :      * The visibility map may need to be fixed even if the heap page is
    8017             :      * already up-to-date.
    8018             :      */
    8019       46316 :     if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
    8020             :     {
    8021           0 :         Relation    reln = CreateFakeRelcacheEntry(target_node);
    8022           0 :         Buffer      vmbuffer = InvalidBuffer;
    8023             : 
    8024           0 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    8025           0 :         visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
    8026           0 :         ReleaseBuffer(vmbuffer);
    8027           0 :         FreeFakeRelcacheEntry(reln);
    8028             :     }
    8029             : 
    8030       46316 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    8031             :     {
    8032       46308 :         page = BufferGetPage(buffer);
    8033             : 
    8034       46308 :         if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
    8035       46308 :             lp = PageGetItemId(page, xlrec->offnum);
    8036             : 
    8037       46308 :         if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
    8038           0 :             elog(PANIC, "invalid lp");
    8039             : 
    8040       46308 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    8041             : 
    8042       46308 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    8043       46308 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    8044       46308 :         HeapTupleHeaderClearHotUpdated(htup);
    8045       46308 :         fix_infomask_from_infobits(xlrec->infobits_set,
    8046             :                                    &htup->t_infomask, &htup->t_infomask2);
    8047       46308 :         if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
    8048       46308 :             HeapTupleHeaderSetXmax(htup, xlrec->xmax);
    8049             :         else
    8050           0 :             HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
    8051       46308 :         HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
    8052             : 
    8053             :         /* Mark the page as a candidate for pruning */
    8054       46308 :         PageSetPrunable(page, XLogRecGetXid(record));
    8055             : 
    8056       46308 :         if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
    8057           0 :             PageClearAllVisible(page);
    8058             : 
    8059             :         /* Make sure t_ctid is set correctly */
    8060       46308 :         if (xlrec->flags & XLH_DELETE_IS_PARTITION_MOVE)
    8061           0 :             HeapTupleHeaderSetMovedPartitions(htup);
    8062             :         else
    8063       46308 :             htup->t_ctid = target_tid;
    8064       46308 :         PageSetLSN(page, lsn);
    8065       46308 :         MarkBufferDirty(buffer);
    8066             :     }
    8067       46316 :     if (BufferIsValid(buffer))
    8068       46316 :         UnlockReleaseBuffer(buffer);
    8069       46316 : }
    8070             : 
    8071             : static void
    8072      299206 : heap_xlog_insert(XLogReaderState *record)
    8073             : {
    8074      299206 :     XLogRecPtr  lsn = record->EndRecPtr;
    8075      299206 :     xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
    8076             :     Buffer      buffer;
    8077             :     Page        page;
    8078             :     union
    8079             :     {
    8080             :         HeapTupleHeaderData hdr;
    8081             :         char        data[MaxHeapTupleSize];
    8082             :     }           tbuf;
    8083             :     HeapTupleHeader htup;
    8084             :     xl_heap_header xlhdr;
    8085             :     uint32      newlen;
    8086      299206 :     Size        freespace = 0;
    8087             :     RelFileNode target_node;
    8088             :     BlockNumber blkno;
    8089             :     ItemPointerData target_tid;
    8090             :     XLogRedoAction action;
    8091             : 
    8092      299206 :     XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
    8093      299206 :     ItemPointerSetBlockNumber(&target_tid, blkno);
    8094      299206 :     ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
    8095             : 
    8096             :     /*
    8097             :      * The visibility map may need to be fixed even if the heap page is
    8098             :      * already up-to-date.
    8099             :      */
    8100      299206 :     if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    8101             :     {
    8102         326 :         Relation    reln = CreateFakeRelcacheEntry(target_node);
    8103         326 :         Buffer      vmbuffer = InvalidBuffer;
    8104             : 
    8105         326 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    8106         326 :         visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
    8107         326 :         ReleaseBuffer(vmbuffer);
    8108         326 :         FreeFakeRelcacheEntry(reln);
    8109             :     }
    8110             : 
    8111             :     /*
    8112             :      * If we inserted the first and only tuple on the page, re-initialize the
    8113             :      * page from scratch.
    8114             :      */
    8115      299206 :     if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
    8116             :     {
    8117        2738 :         buffer = XLogInitBufferForRedo(record, 0);
    8118        2738 :         page = BufferGetPage(buffer);
    8119        2738 :         PageInit(page, BufferGetPageSize(buffer), 0);
    8120        2738 :         action = BLK_NEEDS_REDO;
    8121             :     }
    8122             :     else
    8123      296468 :         action = XLogReadBufferForRedo(record, 0, &buffer);
    8124      299206 :     if (action == BLK_NEEDS_REDO)
    8125             :     {
    8126             :         Size        datalen;
    8127             :         char       *data;
    8128             : 
    8129      298492 :         page = BufferGetPage(buffer);
    8130             : 
    8131      298492 :         if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
    8132           0 :             elog(PANIC, "invalid max offset number");
    8133             : 
    8134      298492 :         data = XLogRecGetBlockData(record, 0, &datalen);
    8135             : 
    8136      298492 :         newlen = datalen - SizeOfHeapHeader;
    8137             :         Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
    8138      298492 :         memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
    8139      298492 :         data += SizeOfHeapHeader;
    8140             : 
    8141      298492 :         htup = &tbuf.hdr;
    8142      298492 :         MemSet((char *) htup, 0, SizeofHeapTupleHeader);
    8143             :         /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
    8144      298492 :         memcpy((char *) htup + SizeofHeapTupleHeader,
    8145             :                data,
    8146             :                newlen);
    8147      298492 :         newlen += SizeofHeapTupleHeader;
    8148      298492 :         htup->t_infomask2 = xlhdr.t_infomask2;
    8149      298492 :         htup->t_infomask = xlhdr.t_infomask;
    8150      298492 :         htup->t_hoff = xlhdr.t_hoff;
    8151      298492 :         HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
    8152      298492 :         HeapTupleHeaderSetCmin(htup, FirstCommandId);
    8153      298492 :         htup->t_ctid = target_tid;
    8154             : 
    8155      298492 :         if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
    8156             :                         true, true) == InvalidOffsetNumber)
    8157           0 :             elog(PANIC, "failed to add tuple");
    8158             : 
    8159      298492 :         freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
    8160             : 
    8161      298492 :         PageSetLSN(page, lsn);
    8162             : 
    8163      298492 :         if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    8164           0 :             PageClearAllVisible(page);
    8165             : 
    8166      298492 :         MarkBufferDirty(buffer);
    8167             :     }
    8168      299206 :     if (BufferIsValid(buffer))
    8169      299206 :         UnlockReleaseBuffer(buffer);
    8170             : 
    8171             :     /*
    8172             :      * If the page is running low on free space, update the FSM as well.
    8173             :      * Arbitrarily, our definition of "low" is less than 20%. We can't do much
    8174             :      * better than that without knowing the fill-factor for the table.
    8175             :      *
    8176             :      * XXX: Don't do this if the page was restored from full page image. We
    8177             :      * don't bother to update the FSM in that case, it doesn't need to be
    8178             :      * totally accurate anyway.
    8179             :      */
    8180      299206 :     if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
    8181       56460 :         XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
    8182      299206 : }
    8183             : 
    8184             : /*
    8185             :  * Handles MULTI_INSERT record type.
    8186             :  */
    8187             : static void
    8188          16 : heap_xlog_multi_insert(XLogReaderState *record)
    8189             : {
    8190          16 :     XLogRecPtr  lsn = record->EndRecPtr;
    8191             :     xl_heap_multi_insert *xlrec;
    8192             :     RelFileNode rnode;
    8193             :     BlockNumber blkno;
    8194             :     Buffer      buffer;
    8195             :     Page        page;
    8196             :     union
    8197             :     {
    8198             :         HeapTupleHeaderData hdr;
    8199             :         char        data[MaxHeapTupleSize];
    8200             :     }           tbuf;
    8201             :     HeapTupleHeader htup;
    8202             :     uint32      newlen;
    8203          16 :     Size        freespace = 0;
    8204             :     int         i;
    8205          16 :     bool        isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
    8206             :     XLogRedoAction action;
    8207             : 
    8208             :     /*
    8209             :      * Insertion doesn't overwrite MVCC data, so no conflict processing is
    8210             :      * required.
    8211             :      */
    8212          16 :     xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
    8213             : 
    8214          16 :     XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
    8215             : 
    8216             :     /*
    8217             :      * The visibility map may need to be fixed even if the heap page is
    8218             :      * already up-to-date.
    8219             :      */
    8220          16 :     if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    8221             :     {
    8222           0 :         Relation    reln = CreateFakeRelcacheEntry(rnode);
    8223           0 :         Buffer      vmbuffer = InvalidBuffer;
    8224             : 
    8225           0 :         visibilitymap_pin(reln, blkno, &vmbuffer);
    8226           0 :         visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
    8227           0 :         ReleaseBuffer(vmbuffer);
    8228           0 :         FreeFakeRelcacheEntry(reln);
    8229             :     }
    8230             : 
    8231          16 :     if (isinit)
    8232             :     {
    8233           8 :         buffer = XLogInitBufferForRedo(record, 0);
    8234           8 :         page = BufferGetPage(buffer);
    8235           8 :         PageInit(page, BufferGetPageSize(buffer), 0);
    8236           8 :         action = BLK_NEEDS_REDO;
    8237             :     }
    8238             :     else
    8239           8 :         action = XLogReadBufferForRedo(record, 0, &buffer);
    8240          16 :     if (action == BLK_NEEDS_REDO)
    8241             :     {
    8242             :         char       *tupdata;
    8243             :         char       *endptr;
    8244             :         Size        len;
    8245             : 
    8246             :         /* Tuples are stored as block data */
    8247          16 :         tupdata = XLogRecGetBlockData(record, 0, &len);
    8248          16 :         endptr = tupdata + len;
    8249             : 
    8250          16 :         page = (Page) BufferGetPage(buffer);
    8251             : 
    8252          64 :         for (i = 0; i < xlrec->ntuples; i++)
    8253             :         {
    8254             :             OffsetNumber offnum;
    8255             :             xl_multi_insert_tuple *xlhdr;
    8256             : 
    8257             :             /*
    8258             :              * If we're reinitializing the page, the tuples are stored in
    8259             :              * order from FirstOffsetNumber. Otherwise there's an array of
    8260             :              * offsets in the WAL record, and the tuples come after that.
    8261             :              */
    8262          48 :             if (isinit)
    8263          24 :                 offnum = FirstOffsetNumber + i;
    8264             :             else
    8265          24 :                 offnum = xlrec->offsets[i];
    8266          48 :             if (PageGetMaxOffsetNumber(page) + 1 < offnum)
    8267           0 :                 elog(PANIC, "invalid max offset number");
    8268             : 
    8269          48 :             xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
    8270          48 :             tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
    8271             : 
    8272          48 :             newlen = xlhdr->datalen;
    8273             :             Assert(newlen <= MaxHeapTupleSize);
    8274          48 :             htup = &tbuf.hdr;
    8275          48 :             MemSet((char *) htup, 0, SizeofHeapTupleHeader);
    8276             :             /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
    8277          48 :             memcpy((char *) htup + SizeofHeapTupleHeader,
    8278             :                    (char *) tupdata,
    8279             :                    newlen);
    8280          48 :             tupdata += newlen;
    8281             : 
    8282          48 :             newlen += SizeofHeapTupleHeader;
    8283          48 :             htup->t_infomask2 = xlhdr->t_infomask2;
    8284          48 :             htup->t_infomask = xlhdr->t_infomask;
    8285          48 :             htup->t_hoff = xlhdr->t_hoff;
    8286          48 :             HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
    8287          48 :             HeapTupleHeaderSetCmin(htup, FirstCommandId);
    8288          48 :             ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
    8289          48 :             ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
    8290             : 
    8291          48 :             offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
    8292          48 :             if (offnum == InvalidOffsetNumber)
    8293           0 :                 elog(PANIC, "failed to add tuple");
    8294             :         }
    8295          16 :         if (tupdata != endptr)
    8296           0 :             elog(PANIC, "total tuple length mismatch");
    8297             : 
    8298          16 :         freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
    8299             : 
    8300          16 :         PageSetLSN(page, lsn);
    8301             : 
    8302          16 :         if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
    8303           0 :             PageClearAllVisible(page);
    8304             : 
    8305          16 :         MarkBufferDirty(buffer);
    8306             :     }
    8307          16 :     if (BufferIsValid(buffer))
    8308          16 :         UnlockReleaseBuffer(buffer);
    8309             : 
    8310             :     /*
    8311             :      * If the page is running low on free space, update the FSM as well.
    8312             :      * Arbitrarily, our definition of "low" is less than 20%. We can't do much
    8313             :      * better than that without knowing the fill-factor for the table.
    8314             :      *
    8315             :      * XXX: Don't do this if the page was restored from full page image. We
    8316             :      * don't bother to update the FSM in that case, it doesn't need to be
    8317             :      * totally accurate anyway.
    8318             :      */
    8319          16 :     if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
    8320           0 :         XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
    8321          16 : }
    8322             : 
    8323             : /*
    8324             :  * Handles UPDATE and HOT_UPDATE
    8325             :  */
    8326             : static void
    8327       60190 : heap_xlog_update(XLogReaderState *record, bool hot_update)
    8328             : {
    8329       60190 :     XLogRecPtr  lsn = record->EndRecPtr;
    8330       60190 :     xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
    8331             :     RelFileNode rnode;
    8332             :     BlockNumber oldblk;
    8333             :     BlockNumber newblk;
    8334             :     ItemPointerData newtid;
    8335             :     Buffer      obuffer,
    8336             :                 nbuffer;
    8337             :     Page        page;
    8338             :     OffsetNumber offnum;
    8339       60190 :     ItemId      lp = NULL;
    8340             :     HeapTupleData oldtup;
    8341             :     HeapTupleHeader htup;
    8342       60190 :     uint16      prefixlen = 0,
    8343       60190 :                 suffixlen = 0;
    8344             :     char       *newp;
    8345             :     union
    8346             :     {
    8347             :         HeapTupleHeaderData hdr;
    8348             :         char        data[MaxHeapTupleSize];
    8349             :     }           tbuf;
    8350             :     xl_heap_header xlhdr;
    8351             :     uint32      newlen;
    8352       60190 :     Size        freespace = 0;
    8353             :     XLogRedoAction oldaction;
    8354             :     XLogRedoAction newaction;
    8355             : 
    8356             :     /* initialize to keep the compiler quiet */
    8357       60190 :     oldtup.t_data = NULL;
    8358       60190 :     oldtup.t_len = 0;
    8359             : 
    8360       60190 :     XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
    8361       60190 :     if (XLogRecGetBlockTag(record, 1, NULL, NULL, &oldblk))
    8362             :     {
    8363             :         /* HOT updates are never done across pages */
    8364             :         Assert(!hot_update);
    8365             :     }
    8366             :     else
    8367       40170 :         oldblk = newblk;
    8368             : 
    8369       60190 :     ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
    8370             : 
    8371             :     /*
    8372             :      * The visibility map may need to be fixed even if the heap page is
    8373             :      * already up-to-date.
    8374             :      */
    8375       60190 :     if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
    8376             :     {
    8377           0 :         Relation    reln = CreateFakeRelcacheEntry(rnode);
    8378           0 :         Buffer      vmbuffer = InvalidBuffer;
    8379             : 
    8380           0 :         visibilitymap_pin(reln, oldblk, &vmbuffer);
    8381           0 :         visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
    8382           0 :         ReleaseBuffer(vmbuffer);
    8383           0 :         FreeFakeRelcacheEntry(reln);
    8384             :     }
    8385             : 
    8386             :     /*
    8387             :      * In normal operation, it is important to lock the two pages in
    8388             :      * page-number order, to avoid possible deadlocks against other update
    8389             :      * operations going the other way.  However, during WAL replay there can
    8390             :      * be no other update happening, so we don't need to worry about that. But
    8391             :      * we *do* need to worry that we don't expose an inconsistent state to Hot
    8392             :      * Standby queries --- so the original page can't be unlocked before we've
    8393             :      * added the new tuple to the new page.
    8394             :      */
    8395             : 
    8396             :     /* Deal with old tuple version */
    8397       60190 :     oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
    8398             :                                       &obuffer);
    8399       60190 :     if (oldaction == BLK_NEEDS_REDO)
    8400             :     {
    8401       60188 :         page = BufferGetPage(obuffer);
    8402       60188 :         offnum = xlrec->old_offnum;
    8403       60188 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    8404       60188 :             lp = PageGetItemId(page, offnum);
    8405             : 
    8406       60188 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    8407           0 :             elog(PANIC, "invalid lp");
    8408             : 
    8409       60188 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    8410             : 
    8411       60188 :         oldtup.t_data = htup;
    8412       60188 :         oldtup.t_len = ItemIdGetLength(lp);
    8413             : 
    8414       60188 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    8415       60188 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    8416       60188 :         if (hot_update)
    8417       40136 :             HeapTupleHeaderSetHotUpdated(htup);
    8418             :         else
    8419       20052 :             HeapTupleHeaderClearHotUpdated(htup);
    8420       60188 :         fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
    8421             :                                    &htup->t_infomask2);
    8422       60188 :         HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
    8423       60188 :         HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
    8424             :         /* Set forward chain link in t_ctid */
    8425       60188 :         htup->t_ctid = newtid;
    8426             : 
    8427             :         /* Mark the page as a candidate for pruning */
    8428       60188 :         PageSetPrunable(page, XLogRecGetXid(record));
    8429             : 
    8430       60188 :         if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
    8431           0 :             PageClearAllVisible(page);
    8432             : 
    8433       60188 :         PageSetLSN(page, lsn);
    8434       60188 :         MarkBufferDirty(obuffer);
    8435             :     }
    8436             : 
    8437             :     /*
    8438             :      * Read the page the new tuple goes into, if different from old.
    8439             :      */
    8440       60190 :     if (oldblk == newblk)
    8441             :     {
    8442       40170 :         nbuffer = obuffer;
    8443       40170 :         newaction = oldaction;
    8444             :     }
    8445       20020 :     else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
    8446             :     {
    8447          88 :         nbuffer = XLogInitBufferForRedo(record, 0);
    8448          88 :         page = (Page) BufferGetPage(nbuffer);
    8449          88 :         PageInit(page, BufferGetPageSize(nbuffer), 0);
    8450          88 :         newaction = BLK_NEEDS_REDO;
    8451             :     }
    8452             :     else
    8453       19932 :         newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
    8454             : 
    8455             :     /*
    8456             :      * The visibility map may need to be fixed even if the heap page is
    8457             :      * already up-to-date.
    8458             :      */
    8459       60190 :     if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
    8460             :     {
    8461           6 :         Relation    reln = CreateFakeRelcacheEntry(rnode);
    8462           6 :         Buffer      vmbuffer = InvalidBuffer;
    8463             : 
    8464           6 :         visibilitymap_pin(reln, newblk, &vmbuffer);
    8465           6 :         visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
    8466           6 :         ReleaseBuffer(vmbuffer);
    8467           6 :         FreeFakeRelcacheEntry(reln);
    8468             :     }
    8469             : 
    8470             :     /* Deal with new tuple */
    8471       60190 :     if (newaction == BLK_NEEDS_REDO)
    8472             :     {
    8473             :         char       *recdata;
    8474             :         char       *recdata_end;
    8475             :         Size        datalen;
    8476             :         Size        tuplen;
    8477             : 
    8478       60182 :         recdata = XLogRecGetBlockData(record, 0, &datalen);
    8479       60182 :         recdata_end = recdata + datalen;
    8480             : 
    8481       60182 :         page = BufferGetPage(nbuffer);
    8482             : 
    8483       60182 :         offnum = xlrec->new_offnum;
    8484       60182 :         if (PageGetMaxOffsetNumber(page) + 1 < offnum)
    8485           0 :             elog(PANIC, "invalid max offset number");
    8486             : 
    8487       60182 :         if (xlrec->flags & XLH_UPDATE_PREFIX_FROM_OLD)
    8488             :         {
    8489             :             Assert(newblk == oldblk);
    8490         168 :             memcpy(&prefixlen, recdata, sizeof(uint16));
    8491         168 :             recdata += sizeof(uint16);
    8492             :         }
    8493       60182 :         if (xlrec->flags & XLH_UPDATE_SUFFIX_FROM_OLD)
    8494             :         {
    8495             :             Assert(newblk == oldblk);
    8496       39956 :             memcpy(&suffixlen, recdata, sizeof(uint16));
    8497       39956 :             recdata += sizeof(uint16);
    8498             :         }
    8499             : 
    8500       60182 :         memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
    8501       60182 :         recdata += SizeOfHeapHeader;
    8502             : 
    8503       60182 :         tuplen = recdata_end - recdata;
    8504             :         Assert(tuplen <= MaxHeapTupleSize);
    8505             : 
    8506       60182 :         htup = &tbuf.hdr;
    8507       60182 :         MemSet((char *) htup, 0, SizeofHeapTupleHeader);
    8508             : 
    8509             :         /*
    8510             :          * Reconstruct the new tuple using the prefix and/or suffix from the
    8511             :          * old tuple, and the data stored in the WAL record.
    8512             :          */
    8513       60182 :         newp = (char *) htup + SizeofHeapTupleHeader;
    8514       60182 :         if (prefixlen > 0)
    8515             :         {
    8516             :             int         len;
    8517             : 
    8518             :             /* copy bitmap [+ padding] [+ oid] from WAL record */
    8519         168 :             len = xlhdr.t_hoff - SizeofHeapTupleHeader;
    8520         168 :             memcpy(newp, recdata, len);
    8521         168 :             recdata += len;
    8522         168 :             newp += len;
    8523             : 
    8524             :             /* copy prefix from old tuple */
    8525         168 :             memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
    8526         168 :             newp += prefixlen;
    8527             : 
    8528             :             /* copy new tuple data from WAL record */
    8529         168 :             len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
    8530         168 :             memcpy(newp, recdata, len);
    8531         168 :             recdata += len;
    8532         168 :             newp += len;
    8533             :         }
    8534             :         else
    8535             :         {
    8536             :             /*
    8537             :              * copy bitmap [+ padding] [+ oid] + data from record, all in one
    8538             :              * go
    8539             :              */
    8540       60014 :             memcpy(newp, recdata, tuplen);
    8541       60014 :             recdata += tuplen;
    8542       60014 :             newp += tuplen;
    8543             :         }
    8544             :         Assert(recdata == recdata_end);
    8545             : 
    8546             :         /* copy suffix from old tuple */
    8547       60182 :         if (suffixlen > 0)
    8548       39956 :             memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
    8549             : 
    8550       60182 :         newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
    8551       60182 :         htup->t_infomask2 = xlhdr.t_infomask2;
    8552       60182 :         htup->t_infomask = xlhdr.t_infomask;
    8553       60182 :         htup->t_hoff = xlhdr.t_hoff;
    8554             : 
    8555       60182 :         HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
    8556       60182 :         HeapTupleHeaderSetCmin(htup, FirstCommandId);
    8557       60182 :         HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
    8558             :         /* Make sure there is no forward chain link in t_ctid */
    8559       60182 :         htup->t_ctid = newtid;
    8560             : 
    8561       60182 :         offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
    8562       60182 :         if (offnum == InvalidOffsetNumber)
    8563           0 :             elog(PANIC, "failed to add tuple");
    8564             : 
    8565       60182 :         if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
    8566           0 :             PageClearAllVisible(page);
    8567             : 
    8568       60182 :         freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
    8569             : 
    8570       60182 :         PageSetLSN(page, lsn);
    8571       60182 :         MarkBufferDirty(nbuffer);
    8572             :     }
    8573             : 
    8574       60190 :     if (BufferIsValid(nbuffer) && nbuffer != obuffer)
    8575       20020 :         UnlockReleaseBuffer(nbuffer);
    8576       60190 :     if (BufferIsValid(obuffer))
    8577       60190 :         UnlockReleaseBuffer(obuffer);
    8578             : 
    8579             :     /*
    8580             :      * If the new page is running low on free space, update the FSM as well.
    8581             :      * Arbitrarily, our definition of "low" is less than 20%. We can't do much
    8582             :      * better than that without knowing the fill-factor for the table.
    8583             :      *
    8584             :      * However, don't update the FSM on HOT updates, because after crash
    8585             :      * recovery, either the old or the new tuple will certainly be dead and
    8586             :      * prunable. After pruning, the page will have roughly as much free space
    8587             :      * as it did before the update, assuming the new tuple is about the same
    8588             :      * size as the old one.
    8589             :      *
    8590             :      * XXX: Don't do this if the page was restored from full page image. We
    8591             :      * don't bother to update the FSM in that case, it doesn't need to be
    8592             :      * totally accurate anyway.
    8593             :      */
    8594       60190 :     if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
    8595        3960 :         XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
    8596       60190 : }
    8597             : 
    8598             : static void
    8599           0 : heap_xlog_confirm(XLogReaderState *record)
    8600             : {
    8601           0 :     XLogRecPtr  lsn = record->EndRecPtr;
    8602           0 :     xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
    8603             :     Buffer      buffer;
    8604             :     Page        page;
    8605             :     OffsetNumber offnum;
    8606           0 :     ItemId      lp = NULL;
    8607             :     HeapTupleHeader htup;
    8608             : 
    8609           0 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    8610             :     {
    8611           0 :         page = BufferGetPage(buffer);
    8612             : 
    8613           0 :         offnum = xlrec->offnum;
    8614           0 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    8615           0 :             lp = PageGetItemId(page, offnum);
    8616             : 
    8617           0 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    8618           0 :             elog(PANIC, "invalid lp");
    8619             : 
    8620           0 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    8621             : 
    8622             :         /*
    8623             :          * Confirm tuple as actually inserted
    8624             :          */
    8625           0 :         ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
    8626             : 
    8627           0 :         PageSetLSN(page, lsn);
    8628           0 :         MarkBufferDirty(buffer);
    8629             :     }
    8630           0 :     if (BufferIsValid(buffer))
    8631           0 :         UnlockReleaseBuffer(buffer);
    8632           0 : }
    8633             : 
    8634             : static void
    8635       20020 : heap_xlog_lock(XLogReaderState *record)
    8636             : {
    8637       20020 :     XLogRecPtr  lsn = record->EndRecPtr;
    8638       20020 :     xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
    8639             :     Buffer      buffer;
    8640             :     Page        page;
    8641             :     OffsetNumber offnum;
    8642       20020 :     ItemId      lp = NULL;
    8643             :     HeapTupleHeader htup;
    8644             : 
    8645             :     /*
    8646             :      * The visibility map may need to be fixed even if the heap page is
    8647             :      * already up-to-date.
    8648             :      */
    8649       20020 :     if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
    8650             :     {
    8651             :         RelFileNode rnode;
    8652           0 :         Buffer      vmbuffer = InvalidBuffer;
    8653             :         BlockNumber block;
    8654             :         Relation    reln;
    8655             : 
    8656           0 :         XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
    8657           0 :         reln = CreateFakeRelcacheEntry(rnode);
    8658             : 
    8659           0 :         visibilitymap_pin(reln, block, &vmbuffer);
    8660           0 :         visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
    8661             : 
    8662           0 :         ReleaseBuffer(vmbuffer);
    8663           0 :         FreeFakeRelcacheEntry(reln);
    8664             :     }
    8665             : 
    8666       20020 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    8667             :     {
    8668       20020 :         page = (Page) BufferGetPage(buffer);
    8669             : 
    8670       20020 :         offnum = xlrec->offnum;
    8671       20020 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    8672       20020 :             lp = PageGetItemId(page, offnum);
    8673             : 
    8674       20020 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    8675           0 :             elog(PANIC, "invalid lp");
    8676             : 
    8677       20020 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    8678             : 
    8679       20020 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    8680       20020 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    8681       20020 :         fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
    8682             :                                    &htup->t_infomask2);
    8683             : 
    8684             :         /*
    8685             :          * Clear relevant update flags, but only if the modified infomask says
    8686             :          * there's no update.
    8687             :          */
    8688       20020 :         if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
    8689             :         {
    8690       20020 :             HeapTupleHeaderClearHotUpdated(htup);
    8691             :             /* Make sure there is no forward chain link in t_ctid */
    8692       20020 :             ItemPointerSet(&htup->t_ctid,
    8693             :                            BufferGetBlockNumber(buffer),
    8694             :                            offnum);
    8695             :         }
    8696       20020 :         HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
    8697       20020 :         HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
    8698       20020 :         PageSetLSN(page, lsn);
    8699       20020 :         MarkBufferDirty(buffer);
    8700             :     }
    8701       20020 :     if (BufferIsValid(buffer))
    8702       20020 :         UnlockReleaseBuffer(buffer);
    8703       20020 : }
    8704             : 
    8705             : static void
    8706           0 : heap_xlog_lock_updated(XLogReaderState *record)
    8707             : {
    8708           0 :     XLogRecPtr  lsn = record->EndRecPtr;
    8709             :     xl_heap_lock_updated *xlrec;
    8710             :     Buffer      buffer;
    8711             :     Page        page;
    8712             :     OffsetNumber offnum;
    8713           0 :     ItemId      lp = NULL;
    8714             :     HeapTupleHeader htup;
    8715             : 
    8716           0 :     xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
    8717             : 
    8718             :     /*
    8719             :      * The visibility map may need to be fixed even if the heap page is
    8720             :      * already up-to-date.
    8721             :      */
    8722           0 :     if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
    8723             :     {
    8724             :         RelFileNode rnode;
    8725           0 :         Buffer      vmbuffer = InvalidBuffer;
    8726             :         BlockNumber block;
    8727             :         Relation    reln;
    8728             : 
    8729           0 :         XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
    8730           0 :         reln = CreateFakeRelcacheEntry(rnode);
    8731             : 
    8732           0 :         visibilitymap_pin(reln, block, &vmbuffer);
    8733           0 :         visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
    8734             : 
    8735           0 :         ReleaseBuffer(vmbuffer);
    8736           0 :         FreeFakeRelcacheEntry(reln);
    8737             :     }
    8738             : 
    8739           0 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    8740             :     {
    8741           0 :         page = BufferGetPage(buffer);
    8742             : 
    8743           0 :         offnum = xlrec->offnum;
    8744           0 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    8745           0 :             lp = PageGetItemId(page, offnum);
    8746             : 
    8747           0 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    8748           0 :             elog(PANIC, "invalid lp");
    8749             : 
    8750           0 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    8751             : 
    8752           0 :         htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
    8753           0 :         htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
    8754           0 :         fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
    8755             :                                    &htup->t_infomask2);
    8756           0 :         HeapTupleHeaderSetXmax(htup, xlrec->xmax);
    8757             : 
    8758           0 :         PageSetLSN(page, lsn);
    8759           0 :         MarkBufferDirty(buffer);
    8760             :     }
    8761           0 :     if (BufferIsValid(buffer))
    8762           0 :         UnlockReleaseBuffer(buffer);
    8763           0 : }
    8764             : 
    8765             : static void
    8766         200 : heap_xlog_inplace(XLogReaderState *record)
    8767             : {
    8768         200 :     XLogRecPtr  lsn = record->EndRecPtr;
    8769         200 :     xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
    8770             :     Buffer      buffer;
    8771             :     Page        page;
    8772             :     OffsetNumber offnum;
    8773         200 :     ItemId      lp = NULL;
    8774             :     HeapTupleHeader htup;
    8775             :     uint32      oldlen;
    8776             :     Size        newlen;
    8777             : 
    8778         200 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
    8779             :     {
    8780         188 :         char       *newtup = XLogRecGetBlockData(record, 0, &newlen);
    8781             : 
    8782         188 :         page = BufferGetPage(buffer);
    8783             : 
    8784         188 :         offnum = xlrec->offnum;
    8785         188 :         if (PageGetMaxOffsetNumber(page) >= offnum)
    8786         188 :             lp = PageGetItemId(page, offnum);
    8787             : 
    8788         188 :         if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
    8789           0 :             elog(PANIC, "invalid lp");
    8790             : 
    8791         188 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    8792             : 
    8793         188 :         oldlen = ItemIdGetLength(lp) - htup->t_hoff;
    8794         188 :         if (oldlen != newlen)
    8795           0 :             elog(PANIC, "wrong tuple length");
    8796             : 
    8797         188 :         memcpy((char *) htup + htup->t_hoff, newtup, newlen);
    8798             : 
    8799         188 :         PageSetLSN(page, lsn);
    8800         188 :         MarkBufferDirty(buffer);
    8801             :     }
    8802         200 :     if (BufferIsValid(buffer))
    8803         200 :         UnlockReleaseBuffer(buffer);
    8804         200 : }
    8805             : 
    8806             : void
    8807      425932 : heap_redo(XLogReaderState *record)
    8808             : {
    8809      425932 :     uint8       info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
    8810             : 
    8811             :     /*
    8812             :      * These operations don't overwrite MVCC data so no conflict processing is
    8813             :      * required. The ones in heap2 rmgr do.
    8814             :      */
    8815             : 
    8816      425932 :     switch (info & XLOG_HEAP_OPMASK)
    8817             :     {
    8818      299206 :         case XLOG_HEAP_INSERT:
    8819      299206 :             heap_xlog_insert(record);
    8820      299206 :             break;
    8821       46316 :         case XLOG_HEAP_DELETE:
    8822       46316 :             heap_xlog_delete(record);
    8823       46316 :             break;
    8824       20054 :         case XLOG_HEAP_UPDATE:
    8825       20054 :             heap_xlog_update(record, false);
    8826       20054 :             break;
    8827           0 :         case XLOG_HEAP_TRUNCATE:
    8828             : 
    8829             :             /*
    8830             :              * TRUNCATE is a no-op because the actions are already logged as
    8831             :              * SMGR WAL records.  TRUNCATE WAL record only exists for logical
    8832             :              * decoding.
    8833             :              */
    8834           0 :             break;
    8835       40136 :         case XLOG_HEAP_HOT_UPDATE:
    8836       40136 :             heap_xlog_update(record, true);
    8837       40136 :             break;
    8838           0 :         case XLOG_HEAP_CONFIRM:
    8839           0 :             heap_xlog_confirm(record);
    8840           0 :             break;
    8841       20020 :         case XLOG_HEAP_LOCK:
    8842       20020 :             heap_xlog_lock(record);
    8843       20020 :             break;
    8844         200 :         case XLOG_HEAP_INPLACE:
    8845         200 :             heap_xlog_inplace(record);
    8846         200 :             break;
    8847           0 :         default:
    8848           0 :             elog(PANIC, "heap_redo: unknown op code %u", info);
    8849             :     }
    8850      425932 : }
    8851             : 
    8852             : void
    8853        2356 : heap2_redo(XLogReaderState *record)
    8854             : {
    8855        2356 :     uint8       info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
    8856             : 
    8857        2356 :     switch (info & XLOG_HEAP_OPMASK)
    8858             :     {
    8859        1858 :         case XLOG_HEAP2_CLEAN:
    8860        1858 :             heap_xlog_clean(record);
    8861        1858 :             break;
    8862           0 :         case XLOG_HEAP2_FREEZE_PAGE:
    8863           0 :             heap_xlog_freeze_page(record);
    8864           0 :             break;
    8865           0 :         case XLOG_HEAP2_CLEANUP_INFO:
    8866           0 :             heap_xlog_cleanup_info(record);
    8867           0 :             break;
    8868         480 :         case XLOG_HEAP2_VISIBLE:
    8869         480 :             heap_xlog_visible(record);
    8870         480 :             break;
    8871          16 :         case XLOG_HEAP2_MULTI_INSERT:
    8872          16 :             heap_xlog_multi_insert(record);
    8873          16 :             break;
    8874           0 :         case XLOG_HEAP2_LOCK_UPDATED:
    8875           0 :             heap_xlog_lock_updated(record);
    8876           0 :             break;
    8877           2 :         case XLOG_HEAP2_NEW_CID:
    8878             : 
    8879             :             /*
    8880             :              * Nothing to do on a real replay, only used during logical
    8881             :              * decoding.
    8882             :              */
    8883           2 :             break;
    8884           0 :         case XLOG_HEAP2_REWRITE:
    8885           0 :             heap_xlog_logical_rewrite(record);
    8886           0 :             break;
    8887           0 :         default:
    8888           0 :             elog(PANIC, "heap2_redo: unknown op code %u", info);
    8889             :     }
    8890        2356 : }
    8891             : 
    8892             : /*
    8893             :  * Mask a heap page before performing consistency checks on it.
    8894             :  */
    8895             : void
    8896           0 : heap_mask(char *pagedata, BlockNumber blkno)
    8897             : {
    8898           0 :     Page        page = (Page) pagedata;
    8899             :     OffsetNumber off;
    8900             : 
    8901           0 :     mask_page_lsn_and_checksum(page);
    8902             : 
    8903           0 :     mask_page_hint_bits(page);
    8904           0 :     mask_unused_space(page);
    8905             : 
    8906           0 :     for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
    8907             :     {
    8908           0 :         ItemId      iid = PageGetItemId(page, off);
    8909             :         char       *page_item;
    8910             : 
    8911           0 :         page_item = (char *) (page + ItemIdGetOffset(iid));
    8912             : 
    8913           0 :         if (ItemIdIsNormal(iid))
    8914             :         {
    8915           0 :             HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
    8916             : 
    8917             :             /*
    8918             :              * If xmin of a tuple is not yet frozen, we should ignore
    8919             :              * differences in hint bits, since they can be set without
    8920             :              * emitting WAL.
    8921             :              */
    8922           0 :             if (!HeapTupleHeaderXminFrozen(page_htup))
    8923           0 :                 page_htup->t_infomask &= ~HEAP_XACT_MASK;
    8924             :             else
    8925             :             {
    8926             :                 /* Still we need to mask xmax hint bits. */
    8927           0 :                 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
    8928           0 :                 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
    8929             :             }
    8930             : 
    8931             :             /*
    8932             :              * During replay, we set Command Id to FirstCommandId. Hence, mask
    8933             :              * it. See heap_xlog_insert() for details.
    8934             :              */
    8935           0 :             page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
    8936             : 
    8937             :             /*
    8938             :              * For a speculative tuple, heap_insert() does not set ctid in the
    8939             :              * caller-passed heap tuple itself, leaving the ctid field to
    8940             :              * contain a speculative token value - a per-backend monotonically
    8941             :              * increasing identifier. Besides, it does not WAL-log ctid under
    8942             :              * any circumstances.
    8943             :              *
    8944             :              * During redo, heap_xlog_insert() sets t_ctid to current block
    8945             :              * number and self offset number. It doesn't care about any
    8946             :              * speculative insertions in master. Hence, we set t_ctid to
    8947             :              * current block number and self offset number to ignore any
    8948             :              * inconsistency.
    8949             :              */
    8950           0 :             if (HeapTupleHeaderIsSpeculative(page_htup))
    8951           0 :                 ItemPointerSet(&page_htup->t_ctid, blkno, off);
    8952             : 
    8953             :             /*
    8954             :              * NB: Not ignoring ctid changes due to the tuple having moved
    8955             :              * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
    8956             :              * important information that needs to be in-sync between primary
    8957             :              * and standby, and thus is WAL logged.
    8958             :              */
    8959             :         }
    8960             : 
    8961             :         /*
    8962             :          * Ignore any padding bytes after the tuple, when the length of the
    8963             :          * item is not MAXALIGNed.
    8964             :          */
    8965           0 :         if (ItemIdHasStorage(iid))
    8966             :         {
    8967           0 :             int         len = ItemIdGetLength(iid);
    8968           0 :             int         padlen = MAXALIGN(len) - len;
    8969             : 
    8970           0 :             if (padlen > 0)
    8971           0 :                 memset(page_item + len, MASK_MARKER, padlen);
    8972             :         }
    8973             :     }
    8974           0 : }
    8975             : 
    8976             : /*
    8977             :  * HeapCheckForSerializableConflictOut
    8978             :  *      We are reading a tuple which has been modified.  If it is visible to
    8979             :  *      us but has been deleted, that indicates a rw-conflict out.  If it's
    8980             :  *      not visible and was created by a concurrent (overlapping)
    8981             :  *      serializable transaction, that is also a rw-conflict out,
    8982             :  *
    8983             :  * We will determine the top level xid of the writing transaction with which
    8984             :  * we may be in conflict, and check for overlap with our own transaction.
    8985             :  * If the transactions overlap (i.e., they cannot see each other's writes),
    8986             :  * then we have a conflict out.
    8987             :  *
    8988             :  * This function should be called just about anywhere in heapam.c where a
    8989             :  * tuple has been read. The caller must hold at least a shared lock on the
    8990             :  * buffer, because this function might set hint bits on the tuple. There is
    8991             :  * currently no known reason to call this function from an index AM.
    8992             :  */
    8993             : void
    8994   265050810 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
    8995             :                                     HeapTuple tuple, Buffer buffer,
    8996             :                                     Snapshot snapshot)
    8997             : {
    8998             :     TransactionId xid;
    8999             :     HTSV_Result htsvResult;
    9000             : 
    9001   265050810 :     if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
    9002   265000996 :         return;
    9003             : 
    9004             :     /*
    9005             :      * Check to see whether the tuple has been written to by a concurrent
    9006             :      * transaction, either to create it not visible to us, or to delete it
    9007             :      * while it is visible to us.  The "visible" bool indicates whether the
    9008             :      * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
    9009             :      * is going on with it.
    9010             :      */
    9011       49814 :     htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
    9012       49814 :     switch (htsvResult)
    9013             :     {
    9014       48290 :         case HEAPTUPLE_LIVE:
    9015       48290 :             if (visible)
    9016       48264 :                 return;
    9017          26 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9018          26 :             break;
    9019          28 :         case HEAPTUPLE_RECENTLY_DEAD:
    9020          28 :             if (!visible)
    9021          10 :                 return;
    9022          18 :             xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    9023          18 :             break;
    9024         656 :         case HEAPTUPLE_DELETE_IN_PROGRESS:
    9025         656 :             xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
    9026         656 :             break;
    9027         616 :         case HEAPTUPLE_INSERT_IN_PROGRESS:
    9028         616 :             xid = HeapTupleHeaderGetXmin(tuple->t_data);
    9029         616 :             break;
    9030         224 :         case HEAPTUPLE_DEAD:
    9031         224 :             return;
    9032           0 :         default:
    9033             : 
    9034             :             /*
    9035             :              * The only way to get to this default clause is if a new value is
    9036             :              * added to the enum type without adding it to this switch
    9037             :              * statement.  That's a bug, so elog.
    9038             :              */
    9039           0 :             elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
    9040             : 
    9041             :             /*
    9042             :              * In spite of having all enum values covered and calling elog on
    9043             :              * this default, some compilers think this is a code path which
    9044             :              * allows xid to be used below without initialization. Silence
    9045             :              * that warning.
    9046             :              */
    9047             :             xid = InvalidTransactionId;
    9048             :     }
    9049             : 
    9050             :     Assert(TransactionIdIsValid(xid));
    9051             :     Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
    9052             : 
    9053             :     /*
    9054             :      * Find top level xid.  Bail out if xid is too early to be a conflict, or
    9055             :      * if it's our own xid.
    9056             :      */
    9057        1316 :     if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
    9058         226 :         return;
    9059        1090 :     xid = SubTransGetTopmostTransaction(xid);
    9060        1090 :     if (TransactionIdPrecedes(xid, TransactionXmin))
    9061           0 :         return;
    9062             : 
    9063        1090 :     CheckForSerializableConflictOut(relation, xid, snapshot);
    9064             : }

Generated by: LCOV version 1.13