LCOV - code coverage report
Current view: top level - src/backend/access/heap - pruneheap.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 490 518 94.6 %
Date: 2025-10-16 02:17:52 Functions: 23 23 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * pruneheap.c
       4             :  *    heap page pruning and HOT-chain management code
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/heap/pruneheap.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : #include "postgres.h"
      16             : 
      17             : #include "access/heapam.h"
      18             : #include "access/heapam_xlog.h"
      19             : #include "access/htup_details.h"
      20             : #include "access/multixact.h"
      21             : #include "access/transam.h"
      22             : #include "access/visibilitymapdefs.h"
      23             : #include "access/xlog.h"
      24             : #include "access/xloginsert.h"
      25             : #include "commands/vacuum.h"
      26             : #include "executor/instrument.h"
      27             : #include "miscadmin.h"
      28             : #include "pgstat.h"
      29             : #include "storage/bufmgr.h"
      30             : #include "utils/rel.h"
      31             : #include "utils/snapmgr.h"
      32             : 
      33             : /* Working data for heap_page_prune_and_freeze() and subroutines */
      34             : typedef struct
      35             : {
      36             :     /*-------------------------------------------------------
      37             :      * Arguments passed to heap_page_prune_and_freeze()
      38             :      *-------------------------------------------------------
      39             :      */
      40             : 
      41             :     /* tuple visibility test, initialized for the relation */
      42             :     GlobalVisState *vistest;
      43             :     /* whether or not dead items can be set LP_UNUSED during pruning */
      44             :     bool        mark_unused_now;
      45             :     /* whether to attempt freezing tuples */
      46             :     bool        attempt_freeze;
      47             :     struct VacuumCutoffs *cutoffs;
      48             : 
      49             :     /*-------------------------------------------------------
      50             :      * Fields describing what to do to the page
      51             :      *-------------------------------------------------------
      52             :      */
      53             :     TransactionId new_prune_xid;    /* new prune hint value */
      54             :     TransactionId latest_xid_removed;
      55             :     int         nredirected;    /* numbers of entries in arrays below */
      56             :     int         ndead;
      57             :     int         nunused;
      58             :     int         nfrozen;
      59             :     /* arrays that accumulate indexes of items to be changed */
      60             :     OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
      61             :     OffsetNumber nowdead[MaxHeapTuplesPerPage];
      62             :     OffsetNumber nowunused[MaxHeapTuplesPerPage];
      63             :     HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
      64             : 
      65             :     /*-------------------------------------------------------
      66             :      * Working state for HOT chain processing
      67             :      *-------------------------------------------------------
      68             :      */
      69             : 
      70             :     /*
      71             :      * 'root_items' contains offsets of all LP_REDIRECT line pointers and
      72             :      * normal non-HOT tuples.  They can be stand-alone items or the first item
      73             :      * in a HOT chain.  'heaponly_items' contains heap-only tuples which can
      74             :      * only be removed as part of a HOT chain.
      75             :      */
      76             :     int         nroot_items;
      77             :     OffsetNumber root_items[MaxHeapTuplesPerPage];
      78             :     int         nheaponly_items;
      79             :     OffsetNumber heaponly_items[MaxHeapTuplesPerPage];
      80             : 
      81             :     /*
      82             :      * processed[offnum] is true if item at offnum has been processed.
      83             :      *
      84             :      * This needs to be MaxHeapTuplesPerPage + 1 long as FirstOffsetNumber is
      85             :      * 1. Otherwise every access would need to subtract 1.
      86             :      */
      87             :     bool        processed[MaxHeapTuplesPerPage + 1];
      88             : 
      89             :     /*
      90             :      * Tuple visibility is only computed once for each tuple, for correctness
      91             :      * and efficiency reasons; see comment in heap_page_prune_and_freeze() for
      92             :      * details.  This is of type int8[], instead of HTSV_Result[], so we can
      93             :      * use -1 to indicate no visibility has been computed, e.g. for LP_DEAD
      94             :      * items.
      95             :      *
      96             :      * This needs to be MaxHeapTuplesPerPage + 1 long as FirstOffsetNumber is
      97             :      * 1. Otherwise every access would need to subtract 1.
      98             :      */
      99             :     int8        htsv[MaxHeapTuplesPerPage + 1];
     100             : 
     101             :     /*
     102             :      * Freezing-related state.
     103             :      */
     104             :     HeapPageFreeze pagefrz;
     105             : 
     106             :     /*-------------------------------------------------------
     107             :      * Information about what was done
     108             :      *
     109             :      * These fields are not used by pruning itself for the most part, but are
     110             :      * used to collect information about what was pruned and what state the
     111             :      * page is in after pruning, for the benefit of the caller.  They are
     112             :      * copied to the caller's PruneFreezeResult at the end.
     113             :      * -------------------------------------------------------
     114             :      */
     115             : 
     116             :     int         ndeleted;       /* Number of tuples deleted from the page */
     117             : 
     118             :     /* Number of live and recently dead tuples, after pruning */
     119             :     int         live_tuples;
     120             :     int         recently_dead_tuples;
     121             : 
     122             :     /* Whether or not the page makes rel truncation unsafe */
     123             :     bool        hastup;
     124             : 
     125             :     /*
     126             :      * LP_DEAD items on the page after pruning.  Includes existing LP_DEAD
     127             :      * items
     128             :      */
     129             :     int         lpdead_items;   /* number of items in the array */
     130             :     OffsetNumber *deadoffsets;  /* points directly to presult->deadoffsets */
     131             : 
     132             :     /*
     133             :      * all_visible and all_frozen indicate if the all-visible and all-frozen
     134             :      * bits in the visibility map can be set for this page after pruning.
     135             :      *
     136             :      * visibility_cutoff_xid is the newest xmin of live tuples on the page.
     137             :      * The caller can use it as the conflict horizon, when setting the VM
     138             :      * bits.  It is only valid if we froze some tuples, and all_frozen is
     139             :      * true.
     140             :      *
     141             :      * NOTE: all_visible and all_frozen don't include LP_DEAD items.  That's
     142             :      * convenient for heap_page_prune_and_freeze(), to use them to decide
     143             :      * whether to freeze the page or not.  The all_visible and all_frozen
     144             :      * values returned to the caller are adjusted to include LP_DEAD items at
     145             :      * the end.
     146             :      *
     147             :      * all_frozen should only be considered valid if all_visible is also set;
     148             :      * we don't bother to clear the all_frozen flag every time we clear the
     149             :      * all_visible flag.
     150             :      */
     151             :     bool        all_visible;
     152             :     bool        all_frozen;
     153             :     TransactionId visibility_cutoff_xid;
     154             : } PruneState;
     155             : 
     156             : /* Local functions */
     157             : static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
     158             :                                                HeapTuple tup,
     159             :                                                Buffer buffer);
     160             : static inline HTSV_Result htsv_get_valid_status(int status);
     161             : static void heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff,
     162             :                              OffsetNumber rootoffnum, PruneState *prstate);
     163             : static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
     164             : static void heap_prune_record_redirect(PruneState *prstate,
     165             :                                        OffsetNumber offnum, OffsetNumber rdoffnum,
     166             :                                        bool was_normal);
     167             : static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
     168             :                                    bool was_normal);
     169             : static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum,
     170             :                                              bool was_normal);
     171             : static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal);
     172             : 
     173             : static void heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum);
     174             : static void heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum);
     175             : static void heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum);
     176             : static void heap_prune_record_unchanged_lp_redirect(PruneState *prstate, OffsetNumber offnum);
     177             : 
     178             : static void page_verify_redirects(Page page);
     179             : 
     180             : static bool heap_page_will_freeze(Relation relation, Buffer buffer,
     181             :                                   bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune,
     182             :                                   PruneState *prstate);
     183             : 
     184             : 
     185             : /*
     186             :  * Optionally prune and repair fragmentation in the specified page.
     187             :  *
     188             :  * This is an opportunistic function.  It will perform housekeeping
     189             :  * only if the page heuristically looks like a candidate for pruning and we
     190             :  * can acquire buffer cleanup lock without blocking.
     191             :  *
     192             :  * Note: this is called quite often.  It's important that it fall out quickly
     193             :  * if there's not any use in pruning.
     194             :  *
     195             :  * Caller must have pin on the buffer, and must *not* have a lock on it.
     196             :  */
     197             : void
     198    32322330 : heap_page_prune_opt(Relation relation, Buffer buffer)
     199             : {
     200    32322330 :     Page        page = BufferGetPage(buffer);
     201             :     TransactionId prune_xid;
     202             :     GlobalVisState *vistest;
     203             :     Size        minfree;
     204             : 
     205             :     /*
     206             :      * We can't write WAL in recovery mode, so there's no point trying to
     207             :      * clean the page. The primary will likely issue a cleaning WAL record
     208             :      * soon anyway, so this is no particular loss.
     209             :      */
     210    32322330 :     if (RecoveryInProgress())
     211      418380 :         return;
     212             : 
     213             :     /*
     214             :      * First check whether there's any chance there's something to prune,
     215             :      * determining the appropriate horizon is a waste if there's no prune_xid
     216             :      * (i.e. no updates/deletes left potentially dead tuples around).
     217             :      */
     218    31903950 :     prune_xid = ((PageHeader) page)->pd_prune_xid;
     219    31903950 :     if (!TransactionIdIsValid(prune_xid))
     220    16436388 :         return;
     221             : 
     222             :     /*
     223             :      * Check whether prune_xid indicates that there may be dead rows that can
     224             :      * be cleaned up.
     225             :      */
     226    15467562 :     vistest = GlobalVisTestFor(relation);
     227             : 
     228    15467562 :     if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
     229    12950392 :         return;
     230             : 
     231             :     /*
     232             :      * We prune when a previous UPDATE failed to find enough space on the page
     233             :      * for a new tuple version, or when free space falls below the relation's
     234             :      * fill-factor target (but not less than 10%).
     235             :      *
     236             :      * Checking free space here is questionable since we aren't holding any
     237             :      * lock on the buffer; in the worst case we could get a bogus answer. It's
     238             :      * unlikely to be *seriously* wrong, though, since reading either pd_lower
     239             :      * or pd_upper is probably atomic.  Avoiding taking a lock seems more
     240             :      * important than sometimes getting a wrong answer in what is after all
     241             :      * just a heuristic estimate.
     242             :      */
     243     2517170 :     minfree = RelationGetTargetPageFreeSpace(relation,
     244             :                                              HEAP_DEFAULT_FILLFACTOR);
     245     2517170 :     minfree = Max(minfree, BLCKSZ / 10);
     246             : 
     247     2517170 :     if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
     248             :     {
     249             :         /* OK, try to get exclusive buffer lock */
     250       84042 :         if (!ConditionalLockBufferForCleanup(buffer))
     251         912 :             return;
     252             : 
     253             :         /*
     254             :          * Now that we have buffer lock, get accurate information about the
     255             :          * page's free space, and recheck the heuristic about whether to
     256             :          * prune.
     257             :          */
     258       83130 :         if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
     259             :         {
     260             :             OffsetNumber dummy_off_loc;
     261             :             PruneFreezeResult presult;
     262             : 
     263             :             /*
     264             :              * For now, pass mark_unused_now as false regardless of whether or
     265             :              * not the relation has indexes, since we cannot safely determine
     266             :              * that during on-access pruning with the current implementation.
     267             :              */
     268       83130 :             heap_page_prune_and_freeze(relation, buffer, vistest, 0,
     269             :                                        NULL, &presult, PRUNE_ON_ACCESS, &dummy_off_loc, NULL, NULL);
     270             : 
     271             :             /*
     272             :              * Report the number of tuples reclaimed to pgstats.  This is
     273             :              * presult.ndeleted minus the number of newly-LP_DEAD-set items.
     274             :              *
     275             :              * We derive the number of dead tuples like this to avoid totally
     276             :              * forgetting about items that were set to LP_DEAD, since they
     277             :              * still need to be cleaned up by VACUUM.  We only want to count
     278             :              * heap-only tuples that just became LP_UNUSED in our report,
     279             :              * which don't.
     280             :              *
     281             :              * VACUUM doesn't have to compensate in the same way when it
     282             :              * tracks ndeleted, since it will set the same LP_DEAD items to
     283             :              * LP_UNUSED separately.
     284             :              */
     285       83130 :             if (presult.ndeleted > presult.nnewlpdead)
     286       38000 :                 pgstat_update_heap_dead_tuples(relation,
     287       38000 :                                                presult.ndeleted - presult.nnewlpdead);
     288             :         }
     289             : 
     290             :         /* And release buffer lock */
     291       83130 :         LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
     292             : 
     293             :         /*
     294             :          * We avoid reuse of any free space created on the page by unrelated
     295             :          * UPDATEs/INSERTs by opting to not update the FSM at this point.  The
     296             :          * free space should be reused by UPDATEs to *this* page.
     297             :          */
     298             :     }
     299             : }
     300             : 
     301             : /*
     302             :  * Decide whether to proceed with freezing according to the freeze plans
     303             :  * prepared for the given heap buffer. If freezing is chosen, this function
     304             :  * performs several pre-freeze checks.
     305             :  *
     306             :  * The values of do_prune, do_hint_prune, and did_tuple_hint_fpi must be
     307             :  * determined before calling this function.
     308             :  *
     309             :  * prstate is both an input and output parameter.
     310             :  *
     311             :  * Returns true if we should apply the freeze plans and freeze tuples on the
     312             :  * page, and false otherwise.
     313             :  */
     314             : static bool
     315     1255932 : heap_page_will_freeze(Relation relation, Buffer buffer,
     316             :                       bool did_tuple_hint_fpi,
     317             :                       bool do_prune,
     318             :                       bool do_hint_prune,
     319             :                       PruneState *prstate)
     320             : {
     321     1255932 :     bool        do_freeze = false;
     322             : 
     323             :     /*
     324             :      * If the caller specified we should not attempt to freeze any tuples,
     325             :      * validate that everything is in the right state and return.
     326             :      */
     327     1255932 :     if (!prstate->attempt_freeze)
     328             :     {
     329             :         Assert(!prstate->all_frozen && prstate->nfrozen == 0);
     330             :         Assert(prstate->lpdead_items == 0 || !prstate->all_visible);
     331       83130 :         return false;
     332             :     }
     333             : 
     334     1172802 :     if (prstate->pagefrz.freeze_required)
     335             :     {
     336             :         /*
     337             :          * heap_prepare_freeze_tuple indicated that at least one XID/MXID from
     338             :          * before FreezeLimit/MultiXactCutoff is present.  Must freeze to
     339             :          * advance relfrozenxid/relminmxid.
     340             :          */
     341       41634 :         do_freeze = true;
     342             :     }
     343             :     else
     344             :     {
     345             :         /*
     346             :          * Opportunistically freeze the page if we are generating an FPI
     347             :          * anyway and if doing so means that we can set the page all-frozen
     348             :          * afterwards (might not happen until VACUUM's final heap pass).
     349             :          *
     350             :          * XXX: Previously, we knew if pruning emitted an FPI by checking
     351             :          * pgWalUsage.wal_fpi before and after pruning.  Once the freeze and
     352             :          * prune records were combined, this heuristic couldn't be used
     353             :          * anymore.  The opportunistic freeze heuristic must be improved;
     354             :          * however, for now, try to approximate the old logic.
     355             :          */
     356     1131168 :         if (prstate->all_visible && prstate->all_frozen && prstate->nfrozen > 0)
     357             :         {
     358             :             /*
     359             :              * Freezing would make the page all-frozen.  Have already emitted
     360             :              * an FPI or will do so anyway?
     361             :              */
     362       26696 :             if (RelationNeedsWAL(relation))
     363             :             {
     364       23386 :                 if (did_tuple_hint_fpi)
     365        2146 :                     do_freeze = true;
     366       21240 :                 else if (do_prune)
     367             :                 {
     368        1812 :                     if (XLogCheckBufferNeedsBackup(buffer))
     369         980 :                         do_freeze = true;
     370             :                 }
     371       19428 :                 else if (do_hint_prune)
     372             :                 {
     373           8 :                     if (XLogHintBitIsNeeded() && XLogCheckBufferNeedsBackup(buffer))
     374           4 :                         do_freeze = true;
     375             :                 }
     376             :             }
     377             :         }
     378             :     }
     379             : 
     380     1172802 :     if (do_freeze)
     381             :     {
     382             :         /*
     383             :          * Validate the tuples we will be freezing before entering the
     384             :          * critical section.
     385             :          */
     386       44764 :         heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
     387             :     }
     388     1128038 :     else if (prstate->nfrozen > 0)
     389             :     {
     390             :         /*
     391             :          * The page contained some tuples that were not already frozen, and we
     392             :          * chose not to freeze them now.  The page won't be all-frozen then.
     393             :          */
     394             :         Assert(!prstate->pagefrz.freeze_required);
     395             : 
     396       24254 :         prstate->all_frozen = false;
     397       24254 :         prstate->nfrozen = 0;    /* avoid miscounts in instrumentation */
     398             :     }
     399             :     else
     400             :     {
     401             :         /*
     402             :          * We have no freeze plans to execute.  The page might already be
     403             :          * all-frozen (perhaps only following pruning), though.  Such pages
     404             :          * can be marked all-frozen in the VM by our caller, even though none
     405             :          * of its tuples were newly frozen here.
     406             :          */
     407             :     }
     408             : 
     409     1172802 :     return do_freeze;
     410             : }
     411             : 
     412             : 
     413             : /*
     414             :  * Prune and repair fragmentation and potentially freeze tuples on the
     415             :  * specified page.
     416             :  *
     417             :  * Caller must have pin and buffer cleanup lock on the page.  Note that we
     418             :  * don't update the FSM information for page on caller's behalf.  Caller might
     419             :  * also need to account for a reduction in the length of the line pointer
     420             :  * array following array truncation by us.
     421             :  *
     422             :  * If the HEAP_PRUNE_FREEZE option is set, we will also freeze tuples if it's
     423             :  * required in order to advance relfrozenxid / relminmxid, or if it's
     424             :  * considered advantageous for overall system performance to do so now.  The
     425             :  * 'cutoffs', 'presult', 'new_relfrozen_xid' and 'new_relmin_mxid' arguments
     426             :  * are required when freezing.  When HEAP_PRUNE_FREEZE option is set, we also
     427             :  * set presult->all_visible and presult->all_frozen on exit, to indicate if
     428             :  * the VM bits can be set.  They are always set to false when the
     429             :  * HEAP_PRUNE_FREEZE option is not set, because at the moment only callers
     430             :  * that also freeze need that information.
     431             :  *
     432             :  * vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
     433             :  * (see heap_prune_satisfies_vacuum).
     434             :  *
     435             :  * options:
     436             :  *   MARK_UNUSED_NOW indicates that dead items can be set LP_UNUSED during
     437             :  *   pruning.
     438             :  *
     439             :  *   FREEZE indicates that we will also freeze tuples, and will return
     440             :  *   'all_visible', 'all_frozen' flags to the caller.
     441             :  *
     442             :  * cutoffs contains the freeze cutoffs, established by VACUUM at the beginning
     443             :  * of vacuuming the relation.  Required if HEAP_PRUNE_FREEZE option is set.
     444             :  * cutoffs->OldestXmin is also used to determine if dead tuples are
     445             :  * HEAPTUPLE_RECENTLY_DEAD or HEAPTUPLE_DEAD.
     446             :  *
     447             :  * presult contains output parameters needed by callers, such as the number of
     448             :  * tuples removed and the offsets of dead items on the page after pruning.
     449             :  * heap_page_prune_and_freeze() is responsible for initializing it.  Required
     450             :  * by all callers.
     451             :  *
     452             :  * reason indicates why the pruning is performed.  It is included in the WAL
     453             :  * record for debugging and analysis purposes, but otherwise has no effect.
     454             :  *
     455             :  * off_loc is the offset location required by the caller to use in error
     456             :  * callback.
     457             :  *
     458             :  * new_relfrozen_xid and new_relmin_mxid must provided by the caller if the
     459             :  * HEAP_PRUNE_FREEZE option is set.  On entry, they contain the oldest XID and
     460             :  * multi-XID seen on the relation so far.  They will be updated with oldest
     461             :  * values present on the page after pruning.  After processing the whole
     462             :  * relation, VACUUM can use these values as the new relfrozenxid/relminmxid
     463             :  * for the relation.
     464             :  */
     465             : void
     466     1255932 : heap_page_prune_and_freeze(Relation relation, Buffer buffer,
     467             :                            GlobalVisState *vistest,
     468             :                            int options,
     469             :                            struct VacuumCutoffs *cutoffs,
     470             :                            PruneFreezeResult *presult,
     471             :                            PruneReason reason,
     472             :                            OffsetNumber *off_loc,
     473             :                            TransactionId *new_relfrozen_xid,
     474             :                            MultiXactId *new_relmin_mxid)
     475             : {
     476     1255932 :     Page        page = BufferGetPage(buffer);
     477     1255932 :     BlockNumber blockno = BufferGetBlockNumber(buffer);
     478             :     OffsetNumber offnum,
     479             :                 maxoff;
     480             :     PruneState  prstate;
     481             :     HeapTupleData tup;
     482             :     bool        do_freeze;
     483             :     bool        do_prune;
     484             :     bool        do_hint_prune;
     485             :     bool        did_tuple_hint_fpi;
     486     1255932 :     int64       fpi_before = pgWalUsage.wal_fpi;
     487             : 
     488             :     /* Copy parameters to prstate */
     489     1255932 :     prstate.vistest = vistest;
     490     1255932 :     prstate.mark_unused_now = (options & HEAP_PAGE_PRUNE_MARK_UNUSED_NOW) != 0;
     491     1255932 :     prstate.attempt_freeze = (options & HEAP_PAGE_PRUNE_FREEZE) != 0;
     492     1255932 :     prstate.cutoffs = cutoffs;
     493             : 
     494             :     /*
     495             :      * Our strategy is to scan the page and make lists of items to change,
     496             :      * then apply the changes within a critical section.  This keeps as much
     497             :      * logic as possible out of the critical section, and also ensures that
     498             :      * WAL replay will work the same as the normal case.
     499             :      *
     500             :      * First, initialize the new pd_prune_xid value to zero (indicating no
     501             :      * prunable tuples).  If we find any tuples which may soon become
     502             :      * prunable, we will save the lowest relevant XID in new_prune_xid. Also
     503             :      * initialize the rest of our working state.
     504             :      */
     505     1255932 :     prstate.new_prune_xid = InvalidTransactionId;
     506     1255932 :     prstate.latest_xid_removed = InvalidTransactionId;
     507     1255932 :     prstate.nredirected = prstate.ndead = prstate.nunused = prstate.nfrozen = 0;
     508     1255932 :     prstate.nroot_items = 0;
     509     1255932 :     prstate.nheaponly_items = 0;
     510             : 
     511             :     /* initialize page freezing working state */
     512     1255932 :     prstate.pagefrz.freeze_required = false;
     513     1255932 :     if (prstate.attempt_freeze)
     514             :     {
     515             :         Assert(new_relfrozen_xid && new_relmin_mxid);
     516     1172802 :         prstate.pagefrz.FreezePageRelfrozenXid = *new_relfrozen_xid;
     517     1172802 :         prstate.pagefrz.NoFreezePageRelfrozenXid = *new_relfrozen_xid;
     518     1172802 :         prstate.pagefrz.FreezePageRelminMxid = *new_relmin_mxid;
     519     1172802 :         prstate.pagefrz.NoFreezePageRelminMxid = *new_relmin_mxid;
     520             :     }
     521             :     else
     522             :     {
     523             :         Assert(new_relfrozen_xid == NULL && new_relmin_mxid == NULL);
     524       83130 :         prstate.pagefrz.FreezePageRelminMxid = InvalidMultiXactId;
     525       83130 :         prstate.pagefrz.NoFreezePageRelminMxid = InvalidMultiXactId;
     526       83130 :         prstate.pagefrz.FreezePageRelfrozenXid = InvalidTransactionId;
     527       83130 :         prstate.pagefrz.NoFreezePageRelfrozenXid = InvalidTransactionId;
     528             :     }
     529             : 
     530     1255932 :     prstate.ndeleted = 0;
     531     1255932 :     prstate.live_tuples = 0;
     532     1255932 :     prstate.recently_dead_tuples = 0;
     533     1255932 :     prstate.hastup = false;
     534     1255932 :     prstate.lpdead_items = 0;
     535     1255932 :     prstate.deadoffsets = presult->deadoffsets;
     536             : 
     537             :     /*
     538             :      * Caller may update the VM after we're done.  We can keep track of
     539             :      * whether the page will be all-visible and all-frozen after pruning and
     540             :      * freezing to help the caller to do that.
     541             :      *
     542             :      * Currently, only VACUUM sets the VM bits.  To save the effort, only do
     543             :      * the bookkeeping if the caller needs it.  Currently, that's tied to
     544             :      * HEAP_PAGE_PRUNE_FREEZE, but it could be a separate flag if you wanted
     545             :      * to update the VM bits without also freezing or freeze without also
     546             :      * setting the VM bits.
     547             :      *
     548             :      * In addition to telling the caller whether it can set the VM bit, we
     549             :      * also use 'all_visible' and 'all_frozen' for our own decision-making. If
     550             :      * the whole page would become frozen, we consider opportunistically
     551             :      * freezing tuples.  We will not be able to freeze the whole page if there
     552             :      * are tuples present that are not visible to everyone or if there are
     553             :      * dead tuples which are not yet removable.  However, dead tuples which
     554             :      * will be removed by the end of vacuuming should not preclude us from
     555             :      * opportunistically freezing.  Because of that, we do not clear
     556             :      * all_visible when we see LP_DEAD items.  We fix that at the end of the
     557             :      * function, when we return the value to the caller, so that the caller
     558             :      * doesn't set the VM bit incorrectly.
     559             :      */
     560     1255932 :     if (prstate.attempt_freeze)
     561             :     {
     562     1172802 :         prstate.all_visible = true;
     563     1172802 :         prstate.all_frozen = true;
     564             :     }
     565             :     else
     566             :     {
     567             :         /*
     568             :          * Initializing to false allows skipping the work to update them in
     569             :          * heap_prune_record_unchanged_lp_normal().
     570             :          */
     571       83130 :         prstate.all_visible = false;
     572       83130 :         prstate.all_frozen = false;
     573             :     }
     574             : 
     575             :     /*
     576             :      * The visibility cutoff xid is the newest xmin of live tuples on the
     577             :      * page.  In the common case, this will be set as the conflict horizon the
     578             :      * caller can use for updating the VM.  If, at the end of freezing and
     579             :      * pruning, the page is all-frozen, there is no possibility that any
     580             :      * running transaction on the standby does not see tuples on the page as
     581             :      * all-visible, so the conflict horizon remains InvalidTransactionId.
     582             :      */
     583     1255932 :     prstate.visibility_cutoff_xid = InvalidTransactionId;
     584             : 
     585     1255932 :     maxoff = PageGetMaxOffsetNumber(page);
     586     1255932 :     tup.t_tableOid = RelationGetRelid(relation);
     587             : 
     588             :     /*
     589             :      * Determine HTSV for all tuples, and queue them up for processing as HOT
     590             :      * chain roots or as heap-only items.
     591             :      *
     592             :      * Determining HTSV only once for each tuple is required for correctness,
     593             :      * to deal with cases where running HTSV twice could result in different
     594             :      * results.  For example, RECENTLY_DEAD can turn to DEAD if another
     595             :      * checked item causes GlobalVisTestIsRemovableFullXid() to update the
     596             :      * horizon, or INSERT_IN_PROGRESS can change to DEAD if the inserting
     597             :      * transaction aborts.
     598             :      *
     599             :      * It's also good for performance. Most commonly tuples within a page are
     600             :      * stored at decreasing offsets (while the items are stored at increasing
     601             :      * offsets). When processing all tuples on a page this leads to reading
     602             :      * memory at decreasing offsets within a page, with a variable stride.
     603             :      * That's hard for CPU prefetchers to deal with. Processing the items in
     604             :      * reverse order (and thus the tuples in increasing order) increases
     605             :      * prefetching efficiency significantly / decreases the number of cache
     606             :      * misses.
     607             :      */
     608     1255932 :     for (offnum = maxoff;
     609    60072484 :          offnum >= FirstOffsetNumber;
     610    58816552 :          offnum = OffsetNumberPrev(offnum))
     611             :     {
     612    58816552 :         ItemId      itemid = PageGetItemId(page, offnum);
     613             :         HeapTupleHeader htup;
     614             : 
     615             :         /*
     616             :          * Set the offset number so that we can display it along with any
     617             :          * error that occurred while processing this tuple.
     618             :          */
     619    58816552 :         *off_loc = offnum;
     620             : 
     621    58816552 :         prstate.processed[offnum] = false;
     622    58816552 :         prstate.htsv[offnum] = -1;
     623             : 
     624             :         /* Nothing to do if slot doesn't contain a tuple */
     625    58816552 :         if (!ItemIdIsUsed(itemid))
     626             :         {
     627      575018 :             heap_prune_record_unchanged_lp_unused(page, &prstate, offnum);
     628      575018 :             continue;
     629             :         }
     630             : 
     631    58241534 :         if (ItemIdIsDead(itemid))
     632             :         {
     633             :             /*
     634             :              * If the caller set mark_unused_now true, we can set dead line
     635             :              * pointers LP_UNUSED now.
     636             :              */
     637     1908034 :             if (unlikely(prstate.mark_unused_now))
     638           0 :                 heap_prune_record_unused(&prstate, offnum, false);
     639             :             else
     640     1908034 :                 heap_prune_record_unchanged_lp_dead(page, &prstate, offnum);
     641     1908034 :             continue;
     642             :         }
     643             : 
     644    56333500 :         if (ItemIdIsRedirected(itemid))
     645             :         {
     646             :             /* This is the start of a HOT chain */
     647      601558 :             prstate.root_items[prstate.nroot_items++] = offnum;
     648      601558 :             continue;
     649             :         }
     650             : 
     651             :         Assert(ItemIdIsNormal(itemid));
     652             : 
     653             :         /*
     654             :          * Get the tuple's visibility status and queue it up for processing.
     655             :          */
     656    55731942 :         htup = (HeapTupleHeader) PageGetItem(page, itemid);
     657    55731942 :         tup.t_data = htup;
     658    55731942 :         tup.t_len = ItemIdGetLength(itemid);
     659    55731942 :         ItemPointerSet(&tup.t_self, blockno, offnum);
     660             : 
     661    55731942 :         prstate.htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, &tup,
     662             :                                                            buffer);
     663             : 
     664    55731942 :         if (!HeapTupleHeaderIsHeapOnly(htup))
     665    54893528 :             prstate.root_items[prstate.nroot_items++] = offnum;
     666             :         else
     667      838414 :             prstate.heaponly_items[prstate.nheaponly_items++] = offnum;
     668             :     }
     669             : 
     670             :     /*
     671             :      * If checksums are enabled, heap_prune_satisfies_vacuum() may have caused
     672             :      * an FPI to be emitted.
     673             :      */
     674     1255932 :     did_tuple_hint_fpi = fpi_before != pgWalUsage.wal_fpi;
     675             : 
     676             :     /*
     677             :      * Process HOT chains.
     678             :      *
     679             :      * We added the items to the array starting from 'maxoff', so by
     680             :      * processing the array in reverse order, we process the items in
     681             :      * ascending offset number order.  The order doesn't matter for
     682             :      * correctness, but some quick micro-benchmarking suggests that this is
     683             :      * faster.  (Earlier PostgreSQL versions, which scanned all the items on
     684             :      * the page instead of using the root_items array, also did it in
     685             :      * ascending offset number order.)
     686             :      */
     687    56751018 :     for (int i = prstate.nroot_items - 1; i >= 0; i--)
     688             :     {
     689    55495086 :         offnum = prstate.root_items[i];
     690             : 
     691             :         /* Ignore items already processed as part of an earlier chain */
     692    55495086 :         if (prstate.processed[offnum])
     693           0 :             continue;
     694             : 
     695             :         /* see preceding loop */
     696    55495086 :         *off_loc = offnum;
     697             : 
     698             :         /* Process this item or chain of items */
     699    55495086 :         heap_prune_chain(page, blockno, maxoff, offnum, &prstate);
     700             :     }
     701             : 
     702             :     /*
     703             :      * Process any heap-only tuples that were not already processed as part of
     704             :      * a HOT chain.
     705             :      */
     706     2094346 :     for (int i = prstate.nheaponly_items - 1; i >= 0; i--)
     707             :     {
     708      838414 :         offnum = prstate.heaponly_items[i];
     709             : 
     710      838414 :         if (prstate.processed[offnum])
     711      811412 :             continue;
     712             : 
     713             :         /* see preceding loop */
     714       27002 :         *off_loc = offnum;
     715             : 
     716             :         /*
     717             :          * If the tuple is DEAD and doesn't chain to anything else, mark it
     718             :          * unused.  (If it does chain, we can only remove it as part of
     719             :          * pruning its chain.)
     720             :          *
     721             :          * We need this primarily to handle aborted HOT updates, that is,
     722             :          * XMIN_INVALID heap-only tuples.  Those might not be linked to by any
     723             :          * chain, since the parent tuple might be re-updated before any
     724             :          * pruning occurs.  So we have to be able to reap them separately from
     725             :          * chain-pruning.  (Note that HeapTupleHeaderIsHotUpdated will never
     726             :          * return true for an XMIN_INVALID tuple, so this code will work even
     727             :          * when there were sequential updates within the aborted transaction.)
     728             :          */
     729       27002 :         if (prstate.htsv[offnum] == HEAPTUPLE_DEAD)
     730             :         {
     731        3978 :             ItemId      itemid = PageGetItemId(page, offnum);
     732        3978 :             HeapTupleHeader htup = (HeapTupleHeader) PageGetItem(page, itemid);
     733             : 
     734        3978 :             if (likely(!HeapTupleHeaderIsHotUpdated(htup)))
     735             :             {
     736        3978 :                 HeapTupleHeaderAdvanceConflictHorizon(htup,
     737             :                                                       &prstate.latest_xid_removed);
     738        3978 :                 heap_prune_record_unused(&prstate, offnum, true);
     739             :             }
     740             :             else
     741             :             {
     742             :                 /*
     743             :                  * This tuple should've been processed and removed as part of
     744             :                  * a HOT chain, so something's wrong.  To preserve evidence,
     745             :                  * we don't dare to remove it.  We cannot leave behind a DEAD
     746             :                  * tuple either, because that will cause VACUUM to error out.
     747             :                  * Throwing an error with a distinct error message seems like
     748             :                  * the least bad option.
     749             :                  */
     750           0 :                 elog(ERROR, "dead heap-only tuple (%u, %d) is not linked to from any HOT chain",
     751             :                      blockno, offnum);
     752             :             }
     753             :         }
     754             :         else
     755       23024 :             heap_prune_record_unchanged_lp_normal(page, &prstate, offnum);
     756             :     }
     757             : 
     758             :     /* We should now have processed every tuple exactly once  */
     759             : #ifdef USE_ASSERT_CHECKING
     760             :     for (offnum = FirstOffsetNumber;
     761             :          offnum <= maxoff;
     762             :          offnum = OffsetNumberNext(offnum))
     763             :     {
     764             :         *off_loc = offnum;
     765             : 
     766             :         Assert(prstate.processed[offnum]);
     767             :     }
     768             : #endif
     769             : 
     770             :     /* Clear the offset information once we have processed the given page. */
     771     1255932 :     *off_loc = InvalidOffsetNumber;
     772             : 
     773     3734420 :     do_prune = prstate.nredirected > 0 ||
     774     2408706 :         prstate.ndead > 0 ||
     775     1152774 :         prstate.nunused > 0;
     776             : 
     777             :     /*
     778             :      * Even if we don't prune anything, if we found a new value for the
     779             :      * pd_prune_xid field or the page was marked full, we will update the hint
     780             :      * bit.
     781             :      */
     782     2408238 :     do_hint_prune = ((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
     783     1152306 :         PageIsFull(page);
     784             : 
     785             :     /*
     786             :      * Decide if we want to go ahead with freezing according to the freeze
     787             :      * plans we prepared, or not.
     788             :      */
     789     1255932 :     do_freeze = heap_page_will_freeze(relation, buffer,
     790             :                                       did_tuple_hint_fpi,
     791             :                                       do_prune,
     792             :                                       do_hint_prune,
     793             :                                       &prstate);
     794             : 
     795             :     /* Any error while applying the changes is critical */
     796     1255932 :     START_CRIT_SECTION();
     797             : 
     798     1255932 :     if (do_hint_prune)
     799             :     {
     800             :         /*
     801             :          * Update the page's pd_prune_xid field to either zero, or the lowest
     802             :          * XID of any soon-prunable tuple.
     803             :          */
     804      103838 :         ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
     805             : 
     806             :         /*
     807             :          * Also clear the "page is full" flag, since there's no point in
     808             :          * repeating the prune/defrag process until something else happens to
     809             :          * the page.
     810             :          */
     811      103838 :         PageClearFull(page);
     812             : 
     813             :         /*
     814             :          * If that's all we had to do to the page, this is a non-WAL-logged
     815             :          * hint.  If we are going to freeze or prune the page, we will mark
     816             :          * the buffer dirty below.
     817             :          */
     818      103838 :         if (!do_freeze && !do_prune)
     819         392 :             MarkBufferDirtyHint(buffer, true);
     820             :     }
     821             : 
     822     1255932 :     if (do_prune || do_freeze)
     823             :     {
     824             :         /* Apply the planned item changes and repair page fragmentation. */
     825      145500 :         if (do_prune)
     826             :         {
     827      103936 :             heap_page_prune_execute(buffer, false,
     828             :                                     prstate.redirected, prstate.nredirected,
     829             :                                     prstate.nowdead, prstate.ndead,
     830             :                                     prstate.nowunused, prstate.nunused);
     831             :         }
     832             : 
     833      145500 :         if (do_freeze)
     834       44764 :             heap_freeze_prepared_tuples(buffer, prstate.frozen, prstate.nfrozen);
     835             : 
     836      145500 :         MarkBufferDirty(buffer);
     837             : 
     838             :         /*
     839             :          * Emit a WAL XLOG_HEAP2_PRUNE* record showing what we did
     840             :          */
     841      145500 :         if (RelationNeedsWAL(relation))
     842             :         {
     843             :             /*
     844             :              * The snapshotConflictHorizon for the whole record should be the
     845             :              * most conservative of all the horizons calculated for any of the
     846             :              * possible modifications.  If this record will prune tuples, any
     847             :              * transactions on the standby older than the youngest xmax of the
     848             :              * most recently removed tuple this record will prune will
     849             :              * conflict.  If this record will freeze tuples, any transactions
     850             :              * on the standby with xids older than the youngest tuple this
     851             :              * record will freeze will conflict.
     852             :              */
     853      143746 :             TransactionId frz_conflict_horizon = InvalidTransactionId;
     854             :             TransactionId conflict_xid;
     855             : 
     856             :             /*
     857             :              * We can use the visibility_cutoff_xid as our cutoff for
     858             :              * conflicts when the whole page is eligible to become all-frozen
     859             :              * in the VM once we're done with it.  Otherwise we generate a
     860             :              * conservative cutoff by stepping back from OldestXmin.
     861             :              */
     862      143746 :             if (do_freeze)
     863             :             {
     864       44760 :                 if (prstate.all_visible && prstate.all_frozen)
     865       40066 :                     frz_conflict_horizon = prstate.visibility_cutoff_xid;
     866             :                 else
     867             :                 {
     868             :                     /* Avoids false conflicts when hot_standby_feedback in use */
     869        4694 :                     frz_conflict_horizon = prstate.cutoffs->OldestXmin;
     870        4694 :                     TransactionIdRetreat(frz_conflict_horizon);
     871             :                 }
     872             :             }
     873             : 
     874      143746 :             if (TransactionIdFollows(frz_conflict_horizon, prstate.latest_xid_removed))
     875       42230 :                 conflict_xid = frz_conflict_horizon;
     876             :             else
     877      101516 :                 conflict_xid = prstate.latest_xid_removed;
     878             : 
     879      143746 :             log_heap_prune_and_freeze(relation, buffer,
     880             :                                       InvalidBuffer,    /* vmbuffer */
     881             :                                       0,    /* vmflags */
     882             :                                       conflict_xid,
     883             :                                       true, reason,
     884             :                                       prstate.frozen, prstate.nfrozen,
     885             :                                       prstate.redirected, prstate.nredirected,
     886             :                                       prstate.nowdead, prstate.ndead,
     887             :                                       prstate.nowunused, prstate.nunused);
     888             :         }
     889             :     }
     890             : 
     891     1255932 :     END_CRIT_SECTION();
     892             : 
     893             :     /* Copy information back for caller */
     894     1255932 :     presult->ndeleted = prstate.ndeleted;
     895     1255932 :     presult->nnewlpdead = prstate.ndead;
     896     1255932 :     presult->nfrozen = prstate.nfrozen;
     897     1255932 :     presult->live_tuples = prstate.live_tuples;
     898     1255932 :     presult->recently_dead_tuples = prstate.recently_dead_tuples;
     899             : 
     900             :     /*
     901             :      * It was convenient to ignore LP_DEAD items in all_visible earlier on to
     902             :      * make the choice of whether or not to freeze the page unaffected by the
     903             :      * short-term presence of LP_DEAD items.  These LP_DEAD items were
     904             :      * effectively assumed to be LP_UNUSED items in the making.  It doesn't
     905             :      * matter which vacuum heap pass (initial pass or final pass) ends up
     906             :      * setting the page all-frozen, as long as the ongoing VACUUM does it.
     907             :      *
     908             :      * Now that freezing has been finalized, unset all_visible if there are
     909             :      * any LP_DEAD items on the page.  It needs to reflect the present state
     910             :      * of the page, as expected by our caller.
     911             :      */
     912     1255932 :     if (prstate.all_visible && prstate.lpdead_items == 0)
     913             :     {
     914      411852 :         presult->all_visible = prstate.all_visible;
     915      411852 :         presult->all_frozen = prstate.all_frozen;
     916             :     }
     917             :     else
     918             :     {
     919      844080 :         presult->all_visible = false;
     920      844080 :         presult->all_frozen = false;
     921             :     }
     922             : 
     923     1255932 :     presult->hastup = prstate.hastup;
     924             : 
     925             :     /*
     926             :      * For callers planning to update the visibility map, the conflict horizon
     927             :      * for that record must be the newest xmin on the page.  However, if the
     928             :      * page is completely frozen, there can be no conflict and the
     929             :      * vm_conflict_horizon should remain InvalidTransactionId.  This includes
     930             :      * the case that we just froze all the tuples; the prune-freeze record
     931             :      * included the conflict XID already so the caller doesn't need it.
     932             :      */
     933     1255932 :     if (presult->all_frozen)
     934      393208 :         presult->vm_conflict_horizon = InvalidTransactionId;
     935             :     else
     936      862724 :         presult->vm_conflict_horizon = prstate.visibility_cutoff_xid;
     937             : 
     938     1255932 :     presult->lpdead_items = prstate.lpdead_items;
     939             :     /* the presult->deadoffsets array was already filled in */
     940             : 
     941     1255932 :     if (prstate.attempt_freeze)
     942             :     {
     943     1172802 :         if (presult->nfrozen > 0)
     944             :         {
     945       44764 :             *new_relfrozen_xid = prstate.pagefrz.FreezePageRelfrozenXid;
     946       44764 :             *new_relmin_mxid = prstate.pagefrz.FreezePageRelminMxid;
     947             :         }
     948             :         else
     949             :         {
     950     1128038 :             *new_relfrozen_xid = prstate.pagefrz.NoFreezePageRelfrozenXid;
     951     1128038 :             *new_relmin_mxid = prstate.pagefrz.NoFreezePageRelminMxid;
     952             :         }
     953             :     }
     954     1255932 : }
     955             : 
     956             : 
     957             : /*
     958             :  * Perform visibility checks for heap pruning.
     959             :  */
     960             : static HTSV_Result
     961    55731942 : heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
     962             : {
     963             :     HTSV_Result res;
     964             :     TransactionId dead_after;
     965             : 
     966    55731942 :     res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
     967             : 
     968    55731942 :     if (res != HEAPTUPLE_RECENTLY_DEAD)
     969    52357428 :         return res;
     970             : 
     971             :     /*
     972             :      * For VACUUM, we must be sure to prune tuples with xmax older than
     973             :      * OldestXmin -- a visibility cutoff determined at the beginning of
     974             :      * vacuuming the relation. OldestXmin is used for freezing determination
     975             :      * and we cannot freeze dead tuples' xmaxes.
     976             :      */
     977     3374514 :     if (prstate->cutoffs &&
     978     1822300 :         TransactionIdIsValid(prstate->cutoffs->OldestXmin) &&
     979     1822300 :         NormalTransactionIdPrecedes(dead_after, prstate->cutoffs->OldestXmin))
     980     1247702 :         return HEAPTUPLE_DEAD;
     981             : 
     982             :     /*
     983             :      * Determine whether or not the tuple is considered dead when compared
     984             :      * with the provided GlobalVisState. On-access pruning does not provide
     985             :      * VacuumCutoffs. And for vacuum, even if the tuple's xmax is not older
     986             :      * than OldestXmin, GlobalVisTestIsRemovableXid() could find the row dead
     987             :      * if the GlobalVisState has been updated since the beginning of vacuuming
     988             :      * the relation.
     989             :      */
     990     2126812 :     if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
     991     1497024 :         return HEAPTUPLE_DEAD;
     992             : 
     993      629788 :     return res;
     994             : }
     995             : 
     996             : 
     997             : /*
     998             :  * Pruning calculates tuple visibility once and saves the results in an array
     999             :  * of int8.  See PruneState.htsv for details.  This helper function is meant
    1000             :  * to guard against examining visibility status array members which have not
    1001             :  * yet been computed.
    1002             :  */
    1003             : static inline HTSV_Result
    1004    55704940 : htsv_get_valid_status(int status)
    1005             : {
    1006             :     Assert(status >= HEAPTUPLE_DEAD &&
    1007             :            status <= HEAPTUPLE_DELETE_IN_PROGRESS);
    1008    55704940 :     return (HTSV_Result) status;
    1009             : }
    1010             : 
    1011             : /*
    1012             :  * Prune specified line pointer or a HOT chain originating at line pointer.
    1013             :  *
    1014             :  * Tuple visibility information is provided in prstate->htsv.
    1015             :  *
    1016             :  * If the item is an index-referenced tuple (i.e. not a heap-only tuple),
    1017             :  * the HOT chain is pruned by removing all DEAD tuples at the start of the HOT
    1018             :  * chain.  We also prune any RECENTLY_DEAD tuples preceding a DEAD tuple.
    1019             :  * This is OK because a RECENTLY_DEAD tuple preceding a DEAD tuple is really
    1020             :  * DEAD, our visibility test is just too coarse to detect it.
    1021             :  *
    1022             :  * Pruning must never leave behind a DEAD tuple that still has tuple storage.
    1023             :  * VACUUM isn't prepared to deal with that case.
    1024             :  *
    1025             :  * The root line pointer is redirected to the tuple immediately after the
    1026             :  * latest DEAD tuple.  If all tuples in the chain are DEAD, the root line
    1027             :  * pointer is marked LP_DEAD.  (This includes the case of a DEAD simple
    1028             :  * tuple, which we treat as a chain of length 1.)
    1029             :  *
    1030             :  * We don't actually change the page here. We just add entries to the arrays in
    1031             :  * prstate showing the changes to be made.  Items to be redirected are added
    1032             :  * to the redirected[] array (two entries per redirection); items to be set to
    1033             :  * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
    1034             :  * state are added to nowunused[].  We perform bookkeeping of live tuples,
    1035             :  * visibility etc. based on what the page will look like after the changes
    1036             :  * applied.  All that bookkeeping is performed in the heap_prune_record_*()
    1037             :  * subroutines.  The division of labor is that heap_prune_chain() decides the
    1038             :  * fate of each tuple, ie. whether it's going to be removed, redirected or
    1039             :  * left unchanged, and the heap_prune_record_*() subroutines update PruneState
    1040             :  * based on that outcome.
    1041             :  */
    1042             : static void
    1043    55495086 : heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff,
    1044             :                  OffsetNumber rootoffnum, PruneState *prstate)
    1045             : {
    1046    55495086 :     TransactionId priorXmax = InvalidTransactionId;
    1047             :     ItemId      rootlp;
    1048             :     OffsetNumber offnum;
    1049             :     OffsetNumber chainitems[MaxHeapTuplesPerPage];
    1050             : 
    1051             :     /*
    1052             :      * After traversing the HOT chain, ndeadchain is the index in chainitems
    1053             :      * of the first live successor after the last dead item.
    1054             :      */
    1055    55495086 :     int         ndeadchain = 0,
    1056    55495086 :                 nchain = 0;
    1057             : 
    1058    55495086 :     rootlp = PageGetItemId(page, rootoffnum);
    1059             : 
    1060             :     /* Start from the root tuple */
    1061    55495086 :     offnum = rootoffnum;
    1062             : 
    1063             :     /* while not end of the chain */
    1064             :     for (;;)
    1065      811412 :     {
    1066             :         HeapTupleHeader htup;
    1067             :         ItemId      lp;
    1068             : 
    1069             :         /* Sanity check (pure paranoia) */
    1070    56306498 :         if (offnum < FirstOffsetNumber)
    1071           0 :             break;
    1072             : 
    1073             :         /*
    1074             :          * An offset past the end of page's line pointer array is possible
    1075             :          * when the array was truncated (original item must have been unused)
    1076             :          */
    1077    56306498 :         if (offnum > maxoff)
    1078           0 :             break;
    1079             : 
    1080             :         /* If item is already processed, stop --- it must not be same chain */
    1081    56306498 :         if (prstate->processed[offnum])
    1082           0 :             break;
    1083             : 
    1084    56306498 :         lp = PageGetItemId(page, offnum);
    1085             : 
    1086             :         /*
    1087             :          * Unused item obviously isn't part of the chain. Likewise, a dead
    1088             :          * line pointer can't be part of the chain.  Both of those cases were
    1089             :          * already marked as processed.
    1090             :          */
    1091             :         Assert(ItemIdIsUsed(lp));
    1092             :         Assert(!ItemIdIsDead(lp));
    1093             : 
    1094             :         /*
    1095             :          * If we are looking at the redirected root line pointer, jump to the
    1096             :          * first normal tuple in the chain.  If we find a redirect somewhere
    1097             :          * else, stop --- it must not be same chain.
    1098             :          */
    1099    56306498 :         if (ItemIdIsRedirected(lp))
    1100             :         {
    1101      601558 :             if (nchain > 0)
    1102           0 :                 break;          /* not at start of chain */
    1103      601558 :             chainitems[nchain++] = offnum;
    1104      601558 :             offnum = ItemIdGetRedirect(rootlp);
    1105      601558 :             continue;
    1106             :         }
    1107             : 
    1108             :         Assert(ItemIdIsNormal(lp));
    1109             : 
    1110    55704940 :         htup = (HeapTupleHeader) PageGetItem(page, lp);
    1111             : 
    1112             :         /*
    1113             :          * Check the tuple XMIN against prior XMAX, if any
    1114             :          */
    1115    55914794 :         if (TransactionIdIsValid(priorXmax) &&
    1116      209854 :             !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
    1117           0 :             break;
    1118             : 
    1119             :         /*
    1120             :          * OK, this tuple is indeed a member of the chain.
    1121             :          */
    1122    55704940 :         chainitems[nchain++] = offnum;
    1123             : 
    1124    55704940 :         switch (htsv_get_valid_status(prstate->htsv[offnum]))
    1125             :         {
    1126     2826064 :             case HEAPTUPLE_DEAD:
    1127             : 
    1128             :                 /* Remember the last DEAD tuple seen */
    1129     2826064 :                 ndeadchain = nchain;
    1130     2826064 :                 HeapTupleHeaderAdvanceConflictHorizon(htup,
    1131             :                                                       &prstate->latest_xid_removed);
    1132             :                 /* Advance to next chain member */
    1133     2826064 :                 break;
    1134             : 
    1135      629788 :             case HEAPTUPLE_RECENTLY_DEAD:
    1136             : 
    1137             :                 /*
    1138             :                  * We don't need to advance the conflict horizon for
    1139             :                  * RECENTLY_DEAD tuples, even if we are removing them.  This
    1140             :                  * is because we only remove RECENTLY_DEAD tuples if they
    1141             :                  * precede a DEAD tuple, and the DEAD tuple must have been
    1142             :                  * inserted by a newer transaction than the RECENTLY_DEAD
    1143             :                  * tuple by virtue of being later in the chain.  We will have
    1144             :                  * advanced the conflict horizon for the DEAD tuple.
    1145             :                  */
    1146             : 
    1147             :                 /*
    1148             :                  * Advance past RECENTLY_DEAD tuples just in case there's a
    1149             :                  * DEAD one after them.  We have to make sure that we don't
    1150             :                  * miss any DEAD tuples, since DEAD tuples that still have
    1151             :                  * tuple storage after pruning will confuse VACUUM.
    1152             :                  */
    1153      629788 :                 break;
    1154             : 
    1155    52249088 :             case HEAPTUPLE_DELETE_IN_PROGRESS:
    1156             :             case HEAPTUPLE_LIVE:
    1157             :             case HEAPTUPLE_INSERT_IN_PROGRESS:
    1158    52249088 :                 goto process_chain;
    1159             : 
    1160           0 :             default:
    1161           0 :                 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
    1162             :                 goto process_chain;
    1163             :         }
    1164             : 
    1165             :         /*
    1166             :          * If the tuple is not HOT-updated, then we are at the end of this
    1167             :          * HOT-update chain.
    1168             :          */
    1169     3455852 :         if (!HeapTupleHeaderIsHotUpdated(htup))
    1170     3245998 :             goto process_chain;
    1171             : 
    1172             :         /* HOT implies it can't have moved to different partition */
    1173             :         Assert(!HeapTupleHeaderIndicatesMovedPartitions(htup));
    1174             : 
    1175             :         /*
    1176             :          * Advance to next chain member.
    1177             :          */
    1178             :         Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blockno);
    1179      209854 :         offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
    1180      209854 :         priorXmax = HeapTupleHeaderGetUpdateXid(htup);
    1181             :     }
    1182             : 
    1183           0 :     if (ItemIdIsRedirected(rootlp) && nchain < 2)
    1184             :     {
    1185             :         /*
    1186             :          * We found a redirect item that doesn't point to a valid follow-on
    1187             :          * item.  This can happen if the loop in heap_page_prune_and_freeze()
    1188             :          * caused us to visit the dead successor of a redirect item before
    1189             :          * visiting the redirect item.  We can clean up by setting the
    1190             :          * redirect item to LP_DEAD state or LP_UNUSED if the caller
    1191             :          * indicated.
    1192             :          */
    1193           0 :         heap_prune_record_dead_or_unused(prstate, rootoffnum, false);
    1194           0 :         return;
    1195             :     }
    1196             : 
    1197           0 : process_chain:
    1198             : 
    1199    55495086 :     if (ndeadchain == 0)
    1200             :     {
    1201             :         /*
    1202             :          * No DEAD tuple was found, so the chain is entirely composed of
    1203             :          * normal, unchanged tuples.  Leave it alone.
    1204             :          */
    1205    52736548 :         int         i = 0;
    1206             : 
    1207    52736548 :         if (ItemIdIsRedirected(rootlp))
    1208             :         {
    1209      568350 :             heap_prune_record_unchanged_lp_redirect(prstate, rootoffnum);
    1210      568350 :             i++;
    1211             :         }
    1212   105482308 :         for (; i < nchain; i++)
    1213    52745760 :             heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
    1214             :     }
    1215     2758538 :     else if (ndeadchain == nchain)
    1216             :     {
    1217             :         /*
    1218             :          * The entire chain is dead.  Mark the root line pointer LP_DEAD, and
    1219             :          * fully remove the other tuples in the chain.
    1220             :          */
    1221     2629090 :         heap_prune_record_dead_or_unused(prstate, rootoffnum, ItemIdIsNormal(rootlp));
    1222     2692438 :         for (int i = 1; i < nchain; i++)
    1223       63348 :             heap_prune_record_unused(prstate, chainitems[i], true);
    1224             :     }
    1225             :     else
    1226             :     {
    1227             :         /*
    1228             :          * We found a DEAD tuple in the chain.  Redirect the root line pointer
    1229             :          * to the first non-DEAD tuple, and mark as unused each intermediate
    1230             :          * item that we are able to remove from the chain.
    1231             :          */
    1232      129448 :         heap_prune_record_redirect(prstate, rootoffnum, chainitems[ndeadchain],
    1233      129448 :                                    ItemIdIsNormal(rootlp));
    1234      166834 :         for (int i = 1; i < ndeadchain; i++)
    1235       37386 :             heap_prune_record_unused(prstate, chainitems[i], true);
    1236             : 
    1237             :         /* the rest of tuples in the chain are normal, unchanged tuples */
    1238      262564 :         for (int i = ndeadchain; i < nchain; i++)
    1239      133116 :             heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
    1240             :     }
    1241             : }
    1242             : 
    1243             : /* Record lowest soon-prunable XID */
    1244             : static void
    1245    18626770 : heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
    1246             : {
    1247             :     /*
    1248             :      * This should exactly match the PageSetPrunable macro.  We can't store
    1249             :      * directly into the page header yet, so we update working state.
    1250             :      */
    1251             :     Assert(TransactionIdIsNormal(xid));
    1252    36518430 :     if (!TransactionIdIsValid(prstate->new_prune_xid) ||
    1253    17891660 :         TransactionIdPrecedes(xid, prstate->new_prune_xid))
    1254      737564 :         prstate->new_prune_xid = xid;
    1255    18626770 : }
    1256             : 
    1257             : /* Record line pointer to be redirected */
    1258             : static void
    1259      129448 : heap_prune_record_redirect(PruneState *prstate,
    1260             :                            OffsetNumber offnum, OffsetNumber rdoffnum,
    1261             :                            bool was_normal)
    1262             : {
    1263             :     Assert(!prstate->processed[offnum]);
    1264      129448 :     prstate->processed[offnum] = true;
    1265             : 
    1266             :     /*
    1267             :      * Do not mark the redirect target here.  It needs to be counted
    1268             :      * separately as an unchanged tuple.
    1269             :      */
    1270             : 
    1271             :     Assert(prstate->nredirected < MaxHeapTuplesPerPage);
    1272      129448 :     prstate->redirected[prstate->nredirected * 2] = offnum;
    1273      129448 :     prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
    1274             : 
    1275      129448 :     prstate->nredirected++;
    1276             : 
    1277             :     /*
    1278             :      * If the root entry had been a normal tuple, we are deleting it, so count
    1279             :      * it in the result.  But changing a redirect (even to DEAD state) doesn't
    1280             :      * count.
    1281             :      */
    1282      129448 :     if (was_normal)
    1283      114276 :         prstate->ndeleted++;
    1284             : 
    1285      129448 :     prstate->hastup = true;
    1286      129448 : }
    1287             : 
    1288             : /* Record line pointer to be marked dead */
    1289             : static void
    1290     2558954 : heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
    1291             :                        bool was_normal)
    1292             : {
    1293             :     Assert(!prstate->processed[offnum]);
    1294     2558954 :     prstate->processed[offnum] = true;
    1295             : 
    1296             :     Assert(prstate->ndead < MaxHeapTuplesPerPage);
    1297     2558954 :     prstate->nowdead[prstate->ndead] = offnum;
    1298     2558954 :     prstate->ndead++;
    1299             : 
    1300             :     /*
    1301             :      * Deliberately delay unsetting all_visible until later during pruning.
    1302             :      * Removable dead tuples shouldn't preclude freezing the page.
    1303             :      */
    1304             : 
    1305             :     /* Record the dead offset for vacuum */
    1306     2558954 :     prstate->deadoffsets[prstate->lpdead_items++] = offnum;
    1307             : 
    1308             :     /*
    1309             :      * If the root entry had been a normal tuple, we are deleting it, so count
    1310             :      * it in the result.  But changing a redirect (even to DEAD state) doesn't
    1311             :      * count.
    1312             :      */
    1313     2558954 :     if (was_normal)
    1314     2540918 :         prstate->ndeleted++;
    1315     2558954 : }
    1316             : 
    1317             : /*
    1318             :  * Depending on whether or not the caller set mark_unused_now to true, record that a
    1319             :  * line pointer should be marked LP_DEAD or LP_UNUSED. There are other cases in
    1320             :  * which we will mark line pointers LP_UNUSED, but we will not mark line
    1321             :  * pointers LP_DEAD if mark_unused_now is true.
    1322             :  */
    1323             : static void
    1324     2629090 : heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum,
    1325             :                                  bool was_normal)
    1326             : {
    1327             :     /*
    1328             :      * If the caller set mark_unused_now to true, we can remove dead tuples
    1329             :      * during pruning instead of marking their line pointers dead. Set this
    1330             :      * tuple's line pointer LP_UNUSED. We hint that this option is less
    1331             :      * likely.
    1332             :      */
    1333     2629090 :     if (unlikely(prstate->mark_unused_now))
    1334       70136 :         heap_prune_record_unused(prstate, offnum, was_normal);
    1335             :     else
    1336     2558954 :         heap_prune_record_dead(prstate, offnum, was_normal);
    1337     2629090 : }
    1338             : 
    1339             : /* Record line pointer to be marked unused */
    1340             : static void
    1341      174848 : heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
    1342             : {
    1343             :     Assert(!prstate->processed[offnum]);
    1344      174848 :     prstate->processed[offnum] = true;
    1345             : 
    1346             :     Assert(prstate->nunused < MaxHeapTuplesPerPage);
    1347      174848 :     prstate->nowunused[prstate->nunused] = offnum;
    1348      174848 :     prstate->nunused++;
    1349             : 
    1350             :     /*
    1351             :      * If the root entry had been a normal tuple, we are deleting it, so count
    1352             :      * it in the result.  But changing a redirect (even to DEAD state) doesn't
    1353             :      * count.
    1354             :      */
    1355      174848 :     if (was_normal)
    1356      174848 :         prstate->ndeleted++;
    1357      174848 : }
    1358             : 
    1359             : /*
    1360             :  * Record an unused line pointer that is left unchanged.
    1361             :  */
    1362             : static void
    1363      575018 : heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum)
    1364             : {
    1365             :     Assert(!prstate->processed[offnum]);
    1366      575018 :     prstate->processed[offnum] = true;
    1367      575018 : }
    1368             : 
    1369             : /*
    1370             :  * Record line pointer that is left unchanged.  We consider freezing it, and
    1371             :  * update bookkeeping of tuple counts and page visibility.
    1372             :  */
    1373             : static void
    1374    52901900 : heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum)
    1375             : {
    1376             :     HeapTupleHeader htup;
    1377             : 
    1378             :     Assert(!prstate->processed[offnum]);
    1379    52901900 :     prstate->processed[offnum] = true;
    1380             : 
    1381    52901900 :     prstate->hastup = true;      /* the page is not empty */
    1382             : 
    1383             :     /*
    1384             :      * The criteria for counting a tuple as live in this block need to match
    1385             :      * what analyze.c's acquire_sample_rows() does, otherwise VACUUM and
    1386             :      * ANALYZE may produce wildly different reltuples values, e.g. when there
    1387             :      * are many recently-dead tuples.
    1388             :      *
    1389             :      * The logic here is a bit simpler than acquire_sample_rows(), as VACUUM
    1390             :      * can't run inside a transaction block, which makes some cases impossible
    1391             :      * (e.g. in-progress insert from the same transaction).
    1392             :      *
    1393             :      * HEAPTUPLE_DEAD are handled by the other heap_prune_record_*()
    1394             :      * subroutines.  They don't count dead items like acquire_sample_rows()
    1395             :      * does, because we assume that all dead items will become LP_UNUSED
    1396             :      * before VACUUM finishes.  This difference is only superficial.  VACUUM
    1397             :      * effectively agrees with ANALYZE about DEAD items, in the end.  VACUUM
    1398             :      * won't remember LP_DEAD items, but only because they're not supposed to
    1399             :      * be left behind when it is done. (Cases where we bypass index vacuuming
    1400             :      * will violate this optimistic assumption, but the overall impact of that
    1401             :      * should be negligible.)
    1402             :      */
    1403    52901900 :     htup = (HeapTupleHeader) PageGetItem(page, PageGetItemId(page, offnum));
    1404             : 
    1405    52901900 :     switch (prstate->htsv[offnum])
    1406             :     {
    1407    34149832 :         case HEAPTUPLE_LIVE:
    1408             : 
    1409             :             /*
    1410             :              * Count it as live.  Not only is this natural, but it's also what
    1411             :              * acquire_sample_rows() does.
    1412             :              */
    1413    34149832 :             prstate->live_tuples++;
    1414             : 
    1415             :             /*
    1416             :              * Is the tuple definitely visible to all transactions?
    1417             :              *
    1418             :              * NB: Like with per-tuple hint bits, we can't set the
    1419             :              * PD_ALL_VISIBLE flag if the inserter committed asynchronously.
    1420             :              * See SetHintBits for more info.  Check that the tuple is hinted
    1421             :              * xmin-committed because of that.
    1422             :              */
    1423    34149832 :             if (prstate->all_visible)
    1424             :             {
    1425             :                 TransactionId xmin;
    1426             : 
    1427    23721290 :                 if (!HeapTupleHeaderXminCommitted(htup))
    1428             :                 {
    1429         480 :                     prstate->all_visible = false;
    1430         480 :                     break;
    1431             :                 }
    1432             : 
    1433             :                 /*
    1434             :                  * The inserter definitely committed.  But is it old enough
    1435             :                  * that everyone sees it as committed?  A FrozenTransactionId
    1436             :                  * is seen as committed to everyone.  Otherwise, we check if
    1437             :                  * there is a snapshot that considers this xid to still be
    1438             :                  * running, and if so, we don't consider the page all-visible.
    1439             :                  */
    1440    23720810 :                 xmin = HeapTupleHeaderGetXmin(htup);
    1441             : 
    1442             :                 /*
    1443             :                  * For now always use prstate->cutoffs for this test, because
    1444             :                  * we only update 'all_visible' when freezing is requested. We
    1445             :                  * could use GlobalVisTestIsRemovableXid instead, if a
    1446             :                  * non-freezing caller wanted to set the VM bit.
    1447             :                  */
    1448             :                 Assert(prstate->cutoffs);
    1449    23720810 :                 if (!TransactionIdPrecedes(xmin, prstate->cutoffs->OldestXmin))
    1450             :                 {
    1451        6634 :                     prstate->all_visible = false;
    1452        6634 :                     break;
    1453             :                 }
    1454             : 
    1455             :                 /* Track newest xmin on page. */
    1456    23714176 :                 if (TransactionIdFollows(xmin, prstate->visibility_cutoff_xid) &&
    1457             :                     TransactionIdIsNormal(xmin))
    1458      210088 :                     prstate->visibility_cutoff_xid = xmin;
    1459             :             }
    1460    34142718 :             break;
    1461             : 
    1462      629788 :         case HEAPTUPLE_RECENTLY_DEAD:
    1463      629788 :             prstate->recently_dead_tuples++;
    1464      629788 :             prstate->all_visible = false;
    1465             : 
    1466             :             /*
    1467             :              * This tuple will soon become DEAD.  Update the hint field so
    1468             :              * that the page is reconsidered for pruning in future.
    1469             :              */
    1470      629788 :             heap_prune_record_prunable(prstate,
    1471             :                                        HeapTupleHeaderGetUpdateXid(htup));
    1472      629788 :             break;
    1473             : 
    1474      125298 :         case HEAPTUPLE_INSERT_IN_PROGRESS:
    1475             : 
    1476             :             /*
    1477             :              * We do not count these rows as live, because we expect the
    1478             :              * inserting transaction to update the counters at commit, and we
    1479             :              * assume that will happen only after we report our results.  This
    1480             :              * assumption is a bit shaky, but it is what acquire_sample_rows()
    1481             :              * does, so be consistent.
    1482             :              */
    1483      125298 :             prstate->all_visible = false;
    1484             : 
    1485             :             /*
    1486             :              * If we wanted to optimize for aborts, we might consider marking
    1487             :              * the page prunable when we see INSERT_IN_PROGRESS.  But we
    1488             :              * don't.  See related decisions about when to mark the page
    1489             :              * prunable in heapam.c.
    1490             :              */
    1491      125298 :             break;
    1492             : 
    1493    17996982 :         case HEAPTUPLE_DELETE_IN_PROGRESS:
    1494             : 
    1495             :             /*
    1496             :              * This an expected case during concurrent vacuum.  Count such
    1497             :              * rows as live.  As above, we assume the deleting transaction
    1498             :              * will commit and update the counters after we report.
    1499             :              */
    1500    17996982 :             prstate->live_tuples++;
    1501    17996982 :             prstate->all_visible = false;
    1502             : 
    1503             :             /*
    1504             :              * This tuple may soon become DEAD.  Update the hint field so that
    1505             :              * the page is reconsidered for pruning in future.
    1506             :              */
    1507    17996982 :             heap_prune_record_prunable(prstate,
    1508             :                                        HeapTupleHeaderGetUpdateXid(htup));
    1509    17996982 :             break;
    1510             : 
    1511           0 :         default:
    1512             : 
    1513             :             /*
    1514             :              * DEAD tuples should've been passed to heap_prune_record_dead()
    1515             :              * or heap_prune_record_unused() instead.
    1516             :              */
    1517           0 :             elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result %d",
    1518             :                  prstate->htsv[offnum]);
    1519             :             break;
    1520             :     }
    1521             : 
    1522             :     /* Consider freezing any normal tuples which will not be removed */
    1523    52901900 :     if (prstate->attempt_freeze)
    1524             :     {
    1525             :         bool        totally_frozen;
    1526             : 
    1527    49946614 :         if ((heap_prepare_freeze_tuple(htup,
    1528    49946614 :                                        prstate->cutoffs,
    1529             :                                        &prstate->pagefrz,
    1530    49946614 :                                        &prstate->frozen[prstate->nfrozen],
    1531             :                                        &totally_frozen)))
    1532             :         {
    1533             :             /* Save prepared freeze plan for later */
    1534     3993426 :             prstate->frozen[prstate->nfrozen++].offset = offnum;
    1535             :         }
    1536             : 
    1537             :         /*
    1538             :          * If any tuple isn't either totally frozen already or eligible to
    1539             :          * become totally frozen (according to its freeze plan), then the page
    1540             :          * definitely cannot be set all-frozen in the visibility map later on.
    1541             :          */
    1542    49946614 :         if (!totally_frozen)
    1543    19278750 :             prstate->all_frozen = false;
    1544             :     }
    1545    52901900 : }
    1546             : 
    1547             : 
    1548             : /*
    1549             :  * Record line pointer that was already LP_DEAD and is left unchanged.
    1550             :  */
    1551             : static void
    1552     1908034 : heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum)
    1553             : {
    1554             :     Assert(!prstate->processed[offnum]);
    1555     1908034 :     prstate->processed[offnum] = true;
    1556             : 
    1557             :     /*
    1558             :      * Deliberately don't set hastup for LP_DEAD items.  We make the soft
    1559             :      * assumption that any LP_DEAD items encountered here will become
    1560             :      * LP_UNUSED later on, before count_nondeletable_pages is reached.  If we
    1561             :      * don't make this assumption then rel truncation will only happen every
    1562             :      * other VACUUM, at most.  Besides, VACUUM must treat
    1563             :      * hastup/nonempty_pages as provisional no matter how LP_DEAD items are
    1564             :      * handled (handled here, or handled later on).
    1565             :      *
    1566             :      * Similarly, don't unset all_visible until later, at the end of
    1567             :      * heap_page_prune_and_freeze().  This will allow us to attempt to freeze
    1568             :      * the page after pruning.  As long as we unset it before updating the
    1569             :      * visibility map, this will be correct.
    1570             :      */
    1571             : 
    1572             :     /* Record the dead offset for vacuum */
    1573     1908034 :     prstate->deadoffsets[prstate->lpdead_items++] = offnum;
    1574     1908034 : }
    1575             : 
    1576             : /*
    1577             :  * Record LP_REDIRECT that is left unchanged.
    1578             :  */
    1579             : static void
    1580      568350 : heap_prune_record_unchanged_lp_redirect(PruneState *prstate, OffsetNumber offnum)
    1581             : {
    1582             :     /*
    1583             :      * A redirect line pointer doesn't count as a live tuple.
    1584             :      *
    1585             :      * If we leave a redirect line pointer in place, there will be another
    1586             :      * tuple on the page that it points to.  We will do the bookkeeping for
    1587             :      * that separately.  So we have nothing to do here, except remember that
    1588             :      * we processed this item.
    1589             :      */
    1590             :     Assert(!prstate->processed[offnum]);
    1591      568350 :     prstate->processed[offnum] = true;
    1592      568350 : }
    1593             : 
    1594             : /*
    1595             :  * Perform the actual page changes needed by heap_page_prune_and_freeze().
    1596             :  *
    1597             :  * If 'lp_truncate_only' is set, we are merely marking LP_DEAD line pointers
    1598             :  * as unused, not redirecting or removing anything else.  The
    1599             :  * PageRepairFragmentation() call is skipped in that case.
    1600             :  *
    1601             :  * If 'lp_truncate_only' is not set, the caller must hold a cleanup lock on
    1602             :  * the buffer.  If it is set, an ordinary exclusive lock suffices.
    1603             :  */
    1604             : void
    1605      123784 : heap_page_prune_execute(Buffer buffer, bool lp_truncate_only,
    1606             :                         OffsetNumber *redirected, int nredirected,
    1607             :                         OffsetNumber *nowdead, int ndead,
    1608             :                         OffsetNumber *nowunused, int nunused)
    1609             : {
    1610      123784 :     Page        page = BufferGetPage(buffer);
    1611             :     OffsetNumber *offnum;
    1612             :     HeapTupleHeader htup PG_USED_FOR_ASSERTS_ONLY;
    1613             : 
    1614             :     /* Shouldn't be called unless there's something to do */
    1615             :     Assert(nredirected > 0 || ndead > 0 || nunused > 0);
    1616             : 
    1617             :     /* If 'lp_truncate_only', we can only remove already-dead line pointers */
    1618             :     Assert(!lp_truncate_only || (nredirected == 0 && ndead == 0));
    1619             : 
    1620             :     /* Update all redirected line pointers */
    1621      123784 :     offnum = redirected;
    1622      290530 :     for (int i = 0; i < nredirected; i++)
    1623             :     {
    1624      166746 :         OffsetNumber fromoff = *offnum++;
    1625      166746 :         OffsetNumber tooff = *offnum++;
    1626      166746 :         ItemId      fromlp = PageGetItemId(page, fromoff);
    1627             :         ItemId      tolp PG_USED_FOR_ASSERTS_ONLY;
    1628             : 
    1629             : #ifdef USE_ASSERT_CHECKING
    1630             : 
    1631             :         /*
    1632             :          * Any existing item that we set as an LP_REDIRECT (any 'from' item)
    1633             :          * must be the first item from a HOT chain.  If the item has tuple
    1634             :          * storage then it can't be a heap-only tuple.  Otherwise we are just
    1635             :          * maintaining an existing LP_REDIRECT from an existing HOT chain that
    1636             :          * has been pruned at least once before now.
    1637             :          */
    1638             :         if (!ItemIdIsRedirected(fromlp))
    1639             :         {
    1640             :             Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp));
    1641             : 
    1642             :             htup = (HeapTupleHeader) PageGetItem(page, fromlp);
    1643             :             Assert(!HeapTupleHeaderIsHeapOnly(htup));
    1644             :         }
    1645             :         else
    1646             :         {
    1647             :             /* We shouldn't need to redundantly set the redirect */
    1648             :             Assert(ItemIdGetRedirect(fromlp) != tooff);
    1649             :         }
    1650             : 
    1651             :         /*
    1652             :          * The item that we're about to set as an LP_REDIRECT (the 'from'
    1653             :          * item) will point to an existing item (the 'to' item) that is
    1654             :          * already a heap-only tuple.  There can be at most one LP_REDIRECT
    1655             :          * item per HOT chain.
    1656             :          *
    1657             :          * We need to keep around an LP_REDIRECT item (after original
    1658             :          * non-heap-only root tuple gets pruned away) so that it's always
    1659             :          * possible for VACUUM to easily figure out what TID to delete from
    1660             :          * indexes when an entire HOT chain becomes dead.  A heap-only tuple
    1661             :          * can never become LP_DEAD; an LP_REDIRECT item or a regular heap
    1662             :          * tuple can.
    1663             :          *
    1664             :          * This check may miss problems, e.g. the target of a redirect could
    1665             :          * be marked as unused subsequently. The page_verify_redirects() check
    1666             :          * below will catch such problems.
    1667             :          */
    1668             :         tolp = PageGetItemId(page, tooff);
    1669             :         Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp));
    1670             :         htup = (HeapTupleHeader) PageGetItem(page, tolp);
    1671             :         Assert(HeapTupleHeaderIsHeapOnly(htup));
    1672             : #endif
    1673             : 
    1674      166746 :         ItemIdSetRedirect(fromlp, tooff);
    1675             :     }
    1676             : 
    1677             :     /* Update all now-dead line pointers */
    1678      123784 :     offnum = nowdead;
    1679     3210772 :     for (int i = 0; i < ndead; i++)
    1680             :     {
    1681     3086988 :         OffsetNumber off = *offnum++;
    1682     3086988 :         ItemId      lp = PageGetItemId(page, off);
    1683             : 
    1684             : #ifdef USE_ASSERT_CHECKING
    1685             : 
    1686             :         /*
    1687             :          * An LP_DEAD line pointer must be left behind when the original item
    1688             :          * (which is dead to everybody) could still be referenced by a TID in
    1689             :          * an index.  This should never be necessary with any individual
    1690             :          * heap-only tuple item, though. (It's not clear how much of a problem
    1691             :          * that would be, but there is no reason to allow it.)
    1692             :          */
    1693             :         if (ItemIdHasStorage(lp))
    1694             :         {
    1695             :             Assert(ItemIdIsNormal(lp));
    1696             :             htup = (HeapTupleHeader) PageGetItem(page, lp);
    1697             :             Assert(!HeapTupleHeaderIsHeapOnly(htup));
    1698             :         }
    1699             :         else
    1700             :         {
    1701             :             /* Whole HOT chain becomes dead */
    1702             :             Assert(ItemIdIsRedirected(lp));
    1703             :         }
    1704             : #endif
    1705             : 
    1706     3086988 :         ItemIdSetDead(lp);
    1707             :     }
    1708             : 
    1709             :     /* Update all now-unused line pointers */
    1710      123784 :     offnum = nowunused;
    1711      720734 :     for (int i = 0; i < nunused; i++)
    1712             :     {
    1713      596950 :         OffsetNumber off = *offnum++;
    1714      596950 :         ItemId      lp = PageGetItemId(page, off);
    1715             : 
    1716             : #ifdef USE_ASSERT_CHECKING
    1717             : 
    1718             :         if (lp_truncate_only)
    1719             :         {
    1720             :             /* Setting LP_DEAD to LP_UNUSED in vacuum's second pass */
    1721             :             Assert(ItemIdIsDead(lp) && !ItemIdHasStorage(lp));
    1722             :         }
    1723             :         else
    1724             :         {
    1725             :             /*
    1726             :              * When heap_page_prune_and_freeze() was called, mark_unused_now
    1727             :              * may have been passed as true, which allows would-be LP_DEAD
    1728             :              * items to be made LP_UNUSED instead.  This is only possible if
    1729             :              * the relation has no indexes.  If there are any dead items, then
    1730             :              * mark_unused_now was not true and every item being marked
    1731             :              * LP_UNUSED must refer to a heap-only tuple.
    1732             :              */
    1733             :             if (ndead > 0)
    1734             :             {
    1735             :                 Assert(ItemIdHasStorage(lp) && ItemIdIsNormal(lp));
    1736             :                 htup = (HeapTupleHeader) PageGetItem(page, lp);
    1737             :                 Assert(HeapTupleHeaderIsHeapOnly(htup));
    1738             :             }
    1739             :             else
    1740             :                 Assert(ItemIdIsUsed(lp));
    1741             :         }
    1742             : 
    1743             : #endif
    1744             : 
    1745      596950 :         ItemIdSetUnused(lp);
    1746             :     }
    1747             : 
    1748      123784 :     if (lp_truncate_only)
    1749        4934 :         PageTruncateLinePointerArray(page);
    1750             :     else
    1751             :     {
    1752             :         /*
    1753             :          * Finally, repair any fragmentation, and update the page's hint bit
    1754             :          * about whether it has free pointers.
    1755             :          */
    1756      118850 :         PageRepairFragmentation(page);
    1757             : 
    1758             :         /*
    1759             :          * Now that the page has been modified, assert that redirect items
    1760             :          * still point to valid targets.
    1761             :          */
    1762      118850 :         page_verify_redirects(page);
    1763             :     }
    1764      123784 : }
    1765             : 
    1766             : 
    1767             : /*
    1768             :  * If built with assertions, verify that all LP_REDIRECT items point to a
    1769             :  * valid item.
    1770             :  *
    1771             :  * One way that bugs related to HOT pruning show is redirect items pointing to
    1772             :  * removed tuples. It's not trivial to reliably check that marking an item
    1773             :  * unused will not orphan a redirect item during heap_prune_chain() /
    1774             :  * heap_page_prune_execute(), so we additionally check the whole page after
    1775             :  * pruning. Without this check such bugs would typically only cause asserts
    1776             :  * later, potentially well after the corruption has been introduced.
    1777             :  *
    1778             :  * Also check comments in heap_page_prune_execute()'s redirection loop.
    1779             :  */
    1780             : static void
    1781      118850 : page_verify_redirects(Page page)
    1782             : {
    1783             : #ifdef USE_ASSERT_CHECKING
    1784             :     OffsetNumber offnum;
    1785             :     OffsetNumber maxoff;
    1786             : 
    1787             :     maxoff = PageGetMaxOffsetNumber(page);
    1788             :     for (offnum = FirstOffsetNumber;
    1789             :          offnum <= maxoff;
    1790             :          offnum = OffsetNumberNext(offnum))
    1791             :     {
    1792             :         ItemId      itemid = PageGetItemId(page, offnum);
    1793             :         OffsetNumber targoff;
    1794             :         ItemId      targitem;
    1795             :         HeapTupleHeader htup;
    1796             : 
    1797             :         if (!ItemIdIsRedirected(itemid))
    1798             :             continue;
    1799             : 
    1800             :         targoff = ItemIdGetRedirect(itemid);
    1801             :         targitem = PageGetItemId(page, targoff);
    1802             : 
    1803             :         Assert(ItemIdIsUsed(targitem));
    1804             :         Assert(ItemIdIsNormal(targitem));
    1805             :         Assert(ItemIdHasStorage(targitem));
    1806             :         htup = (HeapTupleHeader) PageGetItem(page, targitem);
    1807             :         Assert(HeapTupleHeaderIsHeapOnly(htup));
    1808             :     }
    1809             : #endif
    1810      118850 : }
    1811             : 
    1812             : 
    1813             : /*
    1814             :  * For all items in this page, find their respective root line pointers.
    1815             :  * If item k is part of a HOT-chain with root at item j, then we set
    1816             :  * root_offsets[k - 1] = j.
    1817             :  *
    1818             :  * The passed-in root_offsets array must have MaxHeapTuplesPerPage entries.
    1819             :  * Unused entries are filled with InvalidOffsetNumber (zero).
    1820             :  *
    1821             :  * The function must be called with at least share lock on the buffer, to
    1822             :  * prevent concurrent prune operations.
    1823             :  *
    1824             :  * Note: The information collected here is valid only as long as the caller
    1825             :  * holds a pin on the buffer. Once pin is released, a tuple might be pruned
    1826             :  * and reused by a completely unrelated tuple.
    1827             :  */
    1828             : void
    1829      222892 : heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
    1830             : {
    1831             :     OffsetNumber offnum,
    1832             :                 maxoff;
    1833             : 
    1834      222892 :     MemSet(root_offsets, InvalidOffsetNumber,
    1835             :            MaxHeapTuplesPerPage * sizeof(OffsetNumber));
    1836             : 
    1837      222892 :     maxoff = PageGetMaxOffsetNumber(page);
    1838    18010986 :     for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
    1839             :     {
    1840    17788094 :         ItemId      lp = PageGetItemId(page, offnum);
    1841             :         HeapTupleHeader htup;
    1842             :         OffsetNumber nextoffnum;
    1843             :         TransactionId priorXmax;
    1844             : 
    1845             :         /* skip unused and dead items */
    1846    17788094 :         if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
    1847       22154 :             continue;
    1848             : 
    1849    17765940 :         if (ItemIdIsNormal(lp))
    1850             :         {
    1851    17757452 :             htup = (HeapTupleHeader) PageGetItem(page, lp);
    1852             : 
    1853             :             /*
    1854             :              * Check if this tuple is part of a HOT-chain rooted at some other
    1855             :              * tuple. If so, skip it for now; we'll process it when we find
    1856             :              * its root.
    1857             :              */
    1858    17757452 :             if (HeapTupleHeaderIsHeapOnly(htup))
    1859        8980 :                 continue;
    1860             : 
    1861             :             /*
    1862             :              * This is either a plain tuple or the root of a HOT-chain.
    1863             :              * Remember it in the mapping.
    1864             :              */
    1865    17748472 :             root_offsets[offnum - 1] = offnum;
    1866             : 
    1867             :             /* If it's not the start of a HOT-chain, we're done with it */
    1868    17748472 :             if (!HeapTupleHeaderIsHotUpdated(htup))
    1869    17748078 :                 continue;
    1870             : 
    1871             :             /* Set up to scan the HOT-chain */
    1872         394 :             nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
    1873         394 :             priorXmax = HeapTupleHeaderGetUpdateXid(htup);
    1874             :         }
    1875             :         else
    1876             :         {
    1877             :             /* Must be a redirect item. We do not set its root_offsets entry */
    1878             :             Assert(ItemIdIsRedirected(lp));
    1879             :             /* Set up to scan the HOT-chain */
    1880        8488 :             nextoffnum = ItemIdGetRedirect(lp);
    1881        8488 :             priorXmax = InvalidTransactionId;
    1882             :         }
    1883             : 
    1884             :         /*
    1885             :          * Now follow the HOT-chain and collect other tuples in the chain.
    1886             :          *
    1887             :          * Note: Even though this is a nested loop, the complexity of the
    1888             :          * function is O(N) because a tuple in the page should be visited not
    1889             :          * more than twice, once in the outer loop and once in HOT-chain
    1890             :          * chases.
    1891             :          */
    1892             :         for (;;)
    1893             :         {
    1894             :             /* Sanity check (pure paranoia) */
    1895        8974 :             if (offnum < FirstOffsetNumber)
    1896           0 :                 break;
    1897             : 
    1898             :             /*
    1899             :              * An offset past the end of page's line pointer array is possible
    1900             :              * when the array was truncated
    1901             :              */
    1902        8974 :             if (offnum > maxoff)
    1903           0 :                 break;
    1904             : 
    1905        8974 :             lp = PageGetItemId(page, nextoffnum);
    1906             : 
    1907             :             /* Check for broken chains */
    1908        8974 :             if (!ItemIdIsNormal(lp))
    1909           0 :                 break;
    1910             : 
    1911        8974 :             htup = (HeapTupleHeader) PageGetItem(page, lp);
    1912             : 
    1913        9460 :             if (TransactionIdIsValid(priorXmax) &&
    1914         486 :                 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(htup)))
    1915           0 :                 break;
    1916             : 
    1917             :             /* Remember the root line pointer for this item */
    1918        8974 :             root_offsets[nextoffnum - 1] = offnum;
    1919             : 
    1920             :             /* Advance to next chain member, if any */
    1921        8974 :             if (!HeapTupleHeaderIsHotUpdated(htup))
    1922        8882 :                 break;
    1923             : 
    1924             :             /* HOT implies it can't have moved to different partition */
    1925             :             Assert(!HeapTupleHeaderIndicatesMovedPartitions(htup));
    1926             : 
    1927          92 :             nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
    1928          92 :             priorXmax = HeapTupleHeaderGetUpdateXid(htup);
    1929             :         }
    1930             :     }
    1931      222892 : }
    1932             : 
    1933             : 
    1934             : /*
    1935             :  * Compare fields that describe actions required to freeze tuple with caller's
    1936             :  * open plan.  If everything matches then the frz tuple plan is equivalent to
    1937             :  * caller's plan.
    1938             :  */
    1939             : static inline bool
    1940     1836928 : heap_log_freeze_eq(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
    1941             : {
    1942     1836928 :     if (plan->xmax == frz->xmax &&
    1943     1834342 :         plan->t_infomask2 == frz->t_infomask2 &&
    1944     1832680 :         plan->t_infomask == frz->t_infomask &&
    1945     1827686 :         plan->frzflags == frz->frzflags)
    1946     1827686 :         return true;
    1947             : 
    1948             :     /* Caller must call heap_log_freeze_new_plan again for frz */
    1949        9242 :     return false;
    1950             : }
    1951             : 
    1952             : /*
    1953             :  * Comparator used to deduplicate the freeze plans used in WAL records.
    1954             :  */
    1955             : static int
    1956     2563982 : heap_log_freeze_cmp(const void *arg1, const void *arg2)
    1957             : {
    1958     2563982 :     HeapTupleFreeze *frz1 = (HeapTupleFreeze *) arg1;
    1959     2563982 :     HeapTupleFreeze *frz2 = (HeapTupleFreeze *) arg2;
    1960             : 
    1961     2563982 :     if (frz1->xmax < frz2->xmax)
    1962       26330 :         return -1;
    1963     2537652 :     else if (frz1->xmax > frz2->xmax)
    1964       28602 :         return 1;
    1965             : 
    1966     2509050 :     if (frz1->t_infomask2 < frz2->t_infomask2)
    1967        8530 :         return -1;
    1968     2500520 :     else if (frz1->t_infomask2 > frz2->t_infomask2)
    1969        8794 :         return 1;
    1970             : 
    1971     2491726 :     if (frz1->t_infomask < frz2->t_infomask)
    1972       22320 :         return -1;
    1973     2469406 :     else if (frz1->t_infomask > frz2->t_infomask)
    1974       30946 :         return 1;
    1975             : 
    1976     2438460 :     if (frz1->frzflags < frz2->frzflags)
    1977           0 :         return -1;
    1978     2438460 :     else if (frz1->frzflags > frz2->frzflags)
    1979           0 :         return 1;
    1980             : 
    1981             :     /*
    1982             :      * heap_log_freeze_eq would consider these tuple-wise plans to be equal.
    1983             :      * (So the tuples will share a single canonical freeze plan.)
    1984             :      *
    1985             :      * We tiebreak on page offset number to keep each freeze plan's page
    1986             :      * offset number array individually sorted. (Unnecessary, but be tidy.)
    1987             :      */
    1988     2438460 :     if (frz1->offset < frz2->offset)
    1989     2050872 :         return -1;
    1990      387588 :     else if (frz1->offset > frz2->offset)
    1991      387588 :         return 1;
    1992             : 
    1993             :     Assert(false);
    1994           0 :     return 0;
    1995             : }
    1996             : 
    1997             : /*
    1998             :  * Start new plan initialized using tuple-level actions.  At least one tuple
    1999             :  * will have steps required to freeze described by caller's plan during REDO.
    2000             :  */
    2001             : static inline void
    2002       54002 : heap_log_freeze_new_plan(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
    2003             : {
    2004       54002 :     plan->xmax = frz->xmax;
    2005       54002 :     plan->t_infomask2 = frz->t_infomask2;
    2006       54002 :     plan->t_infomask = frz->t_infomask;
    2007       54002 :     plan->frzflags = frz->frzflags;
    2008       54002 :     plan->ntuples = 1;           /* for now */
    2009       54002 : }
    2010             : 
    2011             : /*
    2012             :  * Deduplicate tuple-based freeze plans so that each distinct set of
    2013             :  * processing steps is only stored once in the WAL record.
    2014             :  * Called during original execution of freezing (for logged relations).
    2015             :  *
    2016             :  * Return value is number of plans set in *plans_out for caller.  Also writes
    2017             :  * an array of offset numbers into *offsets_out output argument for caller
    2018             :  * (actually there is one array per freeze plan, but that's not of immediate
    2019             :  * concern to our caller).
    2020             :  */
    2021             : static int
    2022       44760 : heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples,
    2023             :                      xlhp_freeze_plan *plans_out,
    2024             :                      OffsetNumber *offsets_out)
    2025             : {
    2026       44760 :     int         nplans = 0;
    2027             : 
    2028             :     /* Sort tuple-based freeze plans in the order required to deduplicate */
    2029       44760 :     qsort(tuples, ntuples, sizeof(HeapTupleFreeze), heap_log_freeze_cmp);
    2030             : 
    2031     1926448 :     for (int i = 0; i < ntuples; i++)
    2032             :     {
    2033     1881688 :         HeapTupleFreeze *frz = tuples + i;
    2034             : 
    2035     1881688 :         if (i == 0)
    2036             :         {
    2037             :             /* New canonical freeze plan starting with first tup */
    2038       44760 :             heap_log_freeze_new_plan(plans_out, frz);
    2039       44760 :             nplans++;
    2040             :         }
    2041     1836928 :         else if (heap_log_freeze_eq(plans_out, frz))
    2042             :         {
    2043             :             /* tup matches open canonical plan -- include tup in it */
    2044             :             Assert(offsets_out[i - 1] < frz->offset);
    2045     1827686 :             plans_out->ntuples++;
    2046             :         }
    2047             :         else
    2048             :         {
    2049             :             /* Tup doesn't match current plan -- done with it now */
    2050        9242 :             plans_out++;
    2051             : 
    2052             :             /* New canonical freeze plan starting with this tup */
    2053        9242 :             heap_log_freeze_new_plan(plans_out, frz);
    2054        9242 :             nplans++;
    2055             :         }
    2056             : 
    2057             :         /*
    2058             :          * Save page offset number in dedicated buffer in passing.
    2059             :          *
    2060             :          * REDO routine relies on the record's offset numbers array grouping
    2061             :          * offset numbers by freeze plan.  The sort order within each grouping
    2062             :          * is ascending offset number order, just to keep things tidy.
    2063             :          */
    2064     1881688 :         offsets_out[i] = frz->offset;
    2065             :     }
    2066             : 
    2067             :     Assert(nplans > 0 && nplans <= ntuples);
    2068             : 
    2069       44760 :     return nplans;
    2070             : }
    2071             : 
    2072             : /*
    2073             :  * Write an XLOG_HEAP2_PRUNE* WAL record
    2074             :  *
    2075             :  * This is used for several different page maintenance operations:
    2076             :  *
    2077             :  * - Page pruning, in VACUUM's 1st pass or on access: Some items are
    2078             :  *   redirected, some marked dead, and some removed altogether.
    2079             :  *
    2080             :  * - Freezing: Items are marked as 'frozen'.
    2081             :  *
    2082             :  * - Vacuum, 2nd pass: Items that are already LP_DEAD are marked as unused.
    2083             :  *
    2084             :  * They have enough commonalities that we use a single WAL record for them
    2085             :  * all.
    2086             :  *
    2087             :  * If replaying the record requires a cleanup lock, pass cleanup_lock = true.
    2088             :  * Replaying 'redirected' or 'dead' items always requires a cleanup lock, but
    2089             :  * replaying 'unused' items depends on whether they were all previously marked
    2090             :  * as dead.
    2091             :  *
    2092             :  * If the VM is being updated, vmflags will contain the bits to set. In this
    2093             :  * case, vmbuffer should already have been updated and marked dirty and should
    2094             :  * still be pinned and locked.
    2095             :  *
    2096             :  * Note: This function scribbles on the 'frozen' array.
    2097             :  *
    2098             :  * Note: This is called in a critical section, so careful what you do here.
    2099             :  */
    2100             : void
    2101      167478 : log_heap_prune_and_freeze(Relation relation, Buffer buffer,
    2102             :                           Buffer vmbuffer, uint8 vmflags,
    2103             :                           TransactionId conflict_xid,
    2104             :                           bool cleanup_lock,
    2105             :                           PruneReason reason,
    2106             :                           HeapTupleFreeze *frozen, int nfrozen,
    2107             :                           OffsetNumber *redirected, int nredirected,
    2108             :                           OffsetNumber *dead, int ndead,
    2109             :                           OffsetNumber *unused, int nunused)
    2110             : {
    2111             :     xl_heap_prune xlrec;
    2112             :     XLogRecPtr  recptr;
    2113             :     uint8       info;
    2114             :     uint8       regbuf_flags_heap;
    2115             : 
    2116             :     /* The following local variables hold data registered in the WAL record: */
    2117             :     xlhp_freeze_plan plans[MaxHeapTuplesPerPage];
    2118             :     xlhp_freeze_plans freeze_plans;
    2119             :     xlhp_prune_items redirect_items;
    2120             :     xlhp_prune_items dead_items;
    2121             :     xlhp_prune_items unused_items;
    2122             :     OffsetNumber frz_offsets[MaxHeapTuplesPerPage];
    2123      167478 :     bool        do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
    2124      167478 :     bool        do_set_vm = vmflags & VISIBILITYMAP_VALID_BITS;
    2125             : 
    2126             :     Assert((vmflags & VISIBILITYMAP_VALID_BITS) == vmflags);
    2127             : 
    2128      167478 :     xlrec.flags = 0;
    2129      167478 :     regbuf_flags_heap = REGBUF_STANDARD;
    2130             : 
    2131             :     /*
    2132             :      * We can avoid an FPI of the heap page if the only modification we are
    2133             :      * making to it is to set PD_ALL_VISIBLE and checksums/wal_log_hints are
    2134             :      * disabled. Note that if we explicitly skip an FPI, we must not stamp the
    2135             :      * heap page with this record's LSN. Recovery skips records <= the stamped
    2136             :      * LSN, so this could lead to skipping an earlier FPI needed to repair a
    2137             :      * torn page.
    2138             :      */
    2139      167478 :     if (!do_prune &&
    2140           0 :         nfrozen == 0 &&
    2141           0 :         (!do_set_vm || !XLogHintBitIsNeeded()))
    2142           0 :         regbuf_flags_heap |= REGBUF_NO_IMAGE;
    2143             : 
    2144             :     /*
    2145             :      * Prepare data for the buffer.  The arrays are not actually in the
    2146             :      * buffer, but we pretend that they are.  When XLogInsert stores a full
    2147             :      * page image, the arrays can be omitted.
    2148             :      */
    2149      167478 :     XLogBeginInsert();
    2150      167478 :     XLogRegisterBuffer(0, buffer, regbuf_flags_heap);
    2151             : 
    2152      167478 :     if (do_set_vm)
    2153       23434 :         XLogRegisterBuffer(1, vmbuffer, 0);
    2154             : 
    2155      167478 :     if (nfrozen > 0)
    2156             :     {
    2157             :         int         nplans;
    2158             : 
    2159       44760 :         xlrec.flags |= XLHP_HAS_FREEZE_PLANS;
    2160             : 
    2161             :         /*
    2162             :          * Prepare deduplicated representation for use in the WAL record. This
    2163             :          * destructively sorts frozen tuples array in-place.
    2164             :          */
    2165       44760 :         nplans = heap_log_freeze_plan(frozen, nfrozen, plans, frz_offsets);
    2166             : 
    2167       44760 :         freeze_plans.nplans = nplans;
    2168       44760 :         XLogRegisterBufData(0, &freeze_plans,
    2169             :                             offsetof(xlhp_freeze_plans, plans));
    2170       44760 :         XLogRegisterBufData(0, plans,
    2171             :                             sizeof(xlhp_freeze_plan) * nplans);
    2172             :     }
    2173      167478 :     if (nredirected > 0)
    2174             :     {
    2175       33364 :         xlrec.flags |= XLHP_HAS_REDIRECTIONS;
    2176             : 
    2177       33364 :         redirect_items.ntargets = nredirected;
    2178       33364 :         XLogRegisterBufData(0, &redirect_items,
    2179             :                             offsetof(xlhp_prune_items, data));
    2180       33364 :         XLogRegisterBufData(0, redirected,
    2181             :                             sizeof(OffsetNumber[2]) * nredirected);
    2182             :     }
    2183      167478 :     if (ndead > 0)
    2184             :     {
    2185       76234 :         xlrec.flags |= XLHP_HAS_DEAD_ITEMS;
    2186             : 
    2187       76234 :         dead_items.ntargets = ndead;
    2188       76234 :         XLogRegisterBufData(0, &dead_items,
    2189             :                             offsetof(xlhp_prune_items, data));
    2190       76234 :         XLogRegisterBufData(0, dead,
    2191             :                             sizeof(OffsetNumber) * ndead);
    2192             :     }
    2193      167478 :     if (nunused > 0)
    2194             :     {
    2195       45688 :         xlrec.flags |= XLHP_HAS_NOW_UNUSED_ITEMS;
    2196             : 
    2197       45688 :         unused_items.ntargets = nunused;
    2198       45688 :         XLogRegisterBufData(0, &unused_items,
    2199             :                             offsetof(xlhp_prune_items, data));
    2200       45688 :         XLogRegisterBufData(0, unused,
    2201             :                             sizeof(OffsetNumber) * nunused);
    2202             :     }
    2203      167478 :     if (nfrozen > 0)
    2204       44760 :         XLogRegisterBufData(0, frz_offsets,
    2205             :                             sizeof(OffsetNumber) * nfrozen);
    2206             : 
    2207             :     /*
    2208             :      * Prepare the main xl_heap_prune record.  We already set the XLHP_HAS_*
    2209             :      * flag above.
    2210             :      */
    2211      167478 :     if (vmflags & VISIBILITYMAP_ALL_VISIBLE)
    2212             :     {
    2213       23434 :         xlrec.flags |= XLHP_VM_ALL_VISIBLE;
    2214       23434 :         if (vmflags & VISIBILITYMAP_ALL_FROZEN)
    2215       18760 :             xlrec.flags |= XLHP_VM_ALL_FROZEN;
    2216             :     }
    2217      167478 :     if (RelationIsAccessibleInLogicalDecoding(relation))
    2218        1286 :         xlrec.flags |= XLHP_IS_CATALOG_REL;
    2219      167478 :     if (TransactionIdIsValid(conflict_xid))
    2220      138414 :         xlrec.flags |= XLHP_HAS_CONFLICT_HORIZON;
    2221      167478 :     if (cleanup_lock)
    2222      143746 :         xlrec.flags |= XLHP_CLEANUP_LOCK;
    2223             :     else
    2224             :     {
    2225             :         Assert(nredirected == 0 && ndead == 0);
    2226             :         /* also, any items in 'unused' must've been LP_DEAD previously */
    2227             :     }
    2228      167478 :     XLogRegisterData(&xlrec, SizeOfHeapPrune);
    2229      167478 :     if (TransactionIdIsValid(conflict_xid))
    2230      138414 :         XLogRegisterData(&conflict_xid, sizeof(TransactionId));
    2231             : 
    2232      167478 :     switch (reason)
    2233             :     {
    2234       82680 :         case PRUNE_ON_ACCESS:
    2235       82680 :             info = XLOG_HEAP2_PRUNE_ON_ACCESS;
    2236       82680 :             break;
    2237       61066 :         case PRUNE_VACUUM_SCAN:
    2238       61066 :             info = XLOG_HEAP2_PRUNE_VACUUM_SCAN;
    2239       61066 :             break;
    2240       23732 :         case PRUNE_VACUUM_CLEANUP:
    2241       23732 :             info = XLOG_HEAP2_PRUNE_VACUUM_CLEANUP;
    2242       23732 :             break;
    2243           0 :         default:
    2244           0 :             elog(ERROR, "unrecognized prune reason: %d", (int) reason);
    2245             :             break;
    2246             :     }
    2247      167478 :     recptr = XLogInsert(RM_HEAP2_ID, info);
    2248             : 
    2249      167478 :     if (do_set_vm)
    2250             :     {
    2251             :         Assert(BufferIsDirty(vmbuffer));
    2252       23434 :         PageSetLSN(BufferGetPage(vmbuffer), recptr);
    2253             :     }
    2254             : 
    2255             :     /*
    2256             :      * See comment at the top of the function about regbuf_flags_heap for
    2257             :      * details on when we can advance the page LSN.
    2258             :      */
    2259      167478 :     if (do_prune || nfrozen > 0 || (do_set_vm && XLogHintBitIsNeeded()))
    2260             :     {
    2261             :         Assert(BufferIsDirty(buffer));
    2262      167478 :         PageSetLSN(BufferGetPage(buffer), recptr);
    2263             :     }
    2264      167478 : }

Generated by: LCOV version 1.16