LCOV - code coverage report
Current view: top level - src/include/access - heapam.h (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 7 9 77.8 %
Date: 2025-11-21 05:17:49 Functions: 1 1 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * heapam.h
       4             :  *    POSTGRES heap access method definitions.
       5             :  *
       6             :  *
       7             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       8             :  * Portions Copyright (c) 1994, Regents of the University of California
       9             :  *
      10             :  * src/include/access/heapam.h
      11             :  *
      12             :  *-------------------------------------------------------------------------
      13             :  */
      14             : #ifndef HEAPAM_H
      15             : #define HEAPAM_H
      16             : 
      17             : #include "access/heapam_xlog.h"
      18             : #include "access/relation.h"  /* for backward compatibility */
      19             : #include "access/relscan.h"
      20             : #include "access/sdir.h"
      21             : #include "access/skey.h"
      22             : #include "access/table.h"     /* for backward compatibility */
      23             : #include "access/tableam.h"
      24             : #include "commands/vacuum.h"
      25             : #include "nodes/lockoptions.h"
      26             : #include "nodes/primnodes.h"
      27             : #include "storage/bufpage.h"
      28             : #include "storage/dsm.h"
      29             : #include "storage/lockdefs.h"
      30             : #include "storage/read_stream.h"
      31             : #include "storage/shm_toc.h"
      32             : #include "utils/relcache.h"
      33             : #include "utils/snapshot.h"
      34             : 
      35             : 
      36             : /* "options" flag bits for heap_insert */
      37             : #define HEAP_INSERT_SKIP_FSM    TABLE_INSERT_SKIP_FSM
      38             : #define HEAP_INSERT_FROZEN      TABLE_INSERT_FROZEN
      39             : #define HEAP_INSERT_NO_LOGICAL  TABLE_INSERT_NO_LOGICAL
      40             : #define HEAP_INSERT_SPECULATIVE 0x0010
      41             : 
      42             : /* "options" flag bits for heap_page_prune_and_freeze */
      43             : #define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW     (1 << 0)
      44             : #define HEAP_PAGE_PRUNE_FREEZE              (1 << 1)
      45             : 
      46             : typedef struct BulkInsertStateData *BulkInsertState;
      47             : typedef struct GlobalVisState GlobalVisState;
      48             : typedef struct TupleTableSlot TupleTableSlot;
      49             : struct VacuumCutoffs;
      50             : 
      51             : #define MaxLockTupleMode    LockTupleExclusive
      52             : 
      53             : /*
      54             :  * Descriptor for heap table scans.
      55             :  */
      56             : typedef struct HeapScanDescData
      57             : {
      58             :     TableScanDescData rs_base;  /* AM independent part of the descriptor */
      59             : 
      60             :     /* state set up at initscan time */
      61             :     BlockNumber rs_nblocks;     /* total number of blocks in rel */
      62             :     BlockNumber rs_startblock;  /* block # to start at */
      63             :     BlockNumber rs_numblocks;   /* max number of blocks to scan */
      64             :     /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
      65             : 
      66             :     /* scan current state */
      67             :     bool        rs_inited;      /* false = scan not init'd yet */
      68             :     OffsetNumber rs_coffset;    /* current offset # in non-page-at-a-time mode */
      69             :     BlockNumber rs_cblock;      /* current block # in scan, if any */
      70             :     Buffer      rs_cbuf;        /* current buffer in scan, if any */
      71             :     /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
      72             : 
      73             :     BufferAccessStrategy rs_strategy;   /* access strategy for reads */
      74             : 
      75             :     HeapTupleData rs_ctup;      /* current tuple in scan, if any */
      76             : 
      77             :     /* For scans that stream reads */
      78             :     ReadStream *rs_read_stream;
      79             : 
      80             :     /*
      81             :      * For sequential scans and TID range scans to stream reads. The read
      82             :      * stream is allocated at the beginning of the scan and reset on rescan or
      83             :      * when the scan direction changes. The scan direction is saved each time
      84             :      * a new page is requested. If the scan direction changes from one page to
      85             :      * the next, the read stream releases all previously pinned buffers and
      86             :      * resets the prefetch block.
      87             :      */
      88             :     ScanDirection rs_dir;
      89             :     BlockNumber rs_prefetch_block;
      90             : 
      91             :     /*
      92             :      * For parallel scans to store page allocation data.  NULL when not
      93             :      * performing a parallel scan.
      94             :      */
      95             :     ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
      96             : 
      97             :     /* these fields only used in page-at-a-time mode and for bitmap scans */
      98             :     uint32      rs_cindex;      /* current tuple's index in vistuples */
      99             :     uint32      rs_ntuples;     /* number of visible tuples on page */
     100             :     OffsetNumber rs_vistuples[MaxHeapTuplesPerPage];    /* their offsets */
     101             : } HeapScanDescData;
     102             : typedef struct HeapScanDescData *HeapScanDesc;
     103             : 
     104             : typedef struct BitmapHeapScanDescData
     105             : {
     106             :     HeapScanDescData rs_heap_base;
     107             : 
     108             :     /* Holds no data */
     109             : }           BitmapHeapScanDescData;
     110             : typedef struct BitmapHeapScanDescData *BitmapHeapScanDesc;
     111             : 
     112             : /*
     113             :  * Descriptor for fetches from heap via an index.
     114             :  */
     115             : typedef struct IndexFetchHeapData
     116             : {
     117             :     IndexFetchTableData xs_base;    /* AM independent part of the descriptor */
     118             : 
     119             :     Buffer      xs_cbuf;        /* current heap buffer in scan, if any */
     120             :     /* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
     121             : } IndexFetchHeapData;
     122             : 
     123             : /* Result codes for HeapTupleSatisfiesVacuum */
     124             : typedef enum
     125             : {
     126             :     HEAPTUPLE_DEAD,             /* tuple is dead and deletable */
     127             :     HEAPTUPLE_LIVE,             /* tuple is live (committed, no deleter) */
     128             :     HEAPTUPLE_RECENTLY_DEAD,    /* tuple is dead, but not deletable yet */
     129             :     HEAPTUPLE_INSERT_IN_PROGRESS,   /* inserting xact is still in progress */
     130             :     HEAPTUPLE_DELETE_IN_PROGRESS,   /* deleting xact is still in progress */
     131             : } HTSV_Result;
     132             : 
     133             : /*
     134             :  * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
     135             :  * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
     136             :  */
     137             : #define     HEAP_FREEZE_CHECK_XMIN_COMMITTED    0x01
     138             : #define     HEAP_FREEZE_CHECK_XMAX_ABORTED      0x02
     139             : 
     140             : /* heap_prepare_freeze_tuple state describing how to freeze a tuple */
     141             : typedef struct HeapTupleFreeze
     142             : {
     143             :     /* Fields describing how to process tuple */
     144             :     TransactionId xmax;
     145             :     uint16      t_infomask2;
     146             :     uint16      t_infomask;
     147             :     uint8       frzflags;
     148             : 
     149             :     /* xmin/xmax check flags */
     150             :     uint8       checkflags;
     151             :     /* Page offset number for tuple */
     152             :     OffsetNumber offset;
     153             : } HeapTupleFreeze;
     154             : 
     155             : /*
     156             :  * State used by VACUUM to track the details of freezing all eligible tuples
     157             :  * on a given heap page.
     158             :  *
     159             :  * VACUUM prepares freeze plans for each page via heap_prepare_freeze_tuple
     160             :  * calls (every tuple with storage gets its own call).  This page-level freeze
     161             :  * state is updated across each call, which ultimately determines whether or
     162             :  * not freezing the page is required.
     163             :  *
     164             :  * Aside from the basic question of whether or not freezing will go ahead, the
     165             :  * state also tracks the oldest extant XID/MXID in the table as a whole, for
     166             :  * the purposes of advancing relfrozenxid/relminmxid values in pg_class later
     167             :  * on.  Each heap_prepare_freeze_tuple call pushes NewRelfrozenXid and/or
     168             :  * NewRelminMxid back as required to avoid unsafe final pg_class values.  Any
     169             :  * and all unfrozen XIDs or MXIDs that remain after VACUUM finishes _must_
     170             :  * have values >= the final relfrozenxid/relminmxid values in pg_class.  This
     171             :  * includes XIDs that remain as MultiXact members from any tuple's xmax.
     172             :  *
     173             :  * When 'freeze_required' flag isn't set after all tuples are examined, the
     174             :  * final choice on freezing is made by vacuumlazy.c.  It can decide to trigger
     175             :  * freezing based on whatever criteria it deems appropriate.  However, it is
     176             :  * recommended that vacuumlazy.c avoid early freezing when freezing does not
     177             :  * enable setting the target page all-frozen in the visibility map afterwards.
     178             :  */
     179             : typedef struct HeapPageFreeze
     180             : {
     181             :     /* Is heap_prepare_freeze_tuple caller required to freeze page? */
     182             :     bool        freeze_required;
     183             : 
     184             :     /*
     185             :      * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
     186             :      *
     187             :      * Trackers used when heap_freeze_execute_prepared freezes, or when there
     188             :      * are zero freeze plans for a page.  It is always valid for vacuumlazy.c
     189             :      * to freeze any page, by definition.  This even includes pages that have
     190             :      * no tuples with storage to consider in the first place.  That way the
     191             :      * 'totally_frozen' results from heap_prepare_freeze_tuple can always be
     192             :      * used in the same way, even when no freeze plans need to be executed to
     193             :      * "freeze the page".  Only the "freeze" path needs to consider the need
     194             :      * to set pages all-frozen in the visibility map under this scheme.
     195             :      *
     196             :      * When we freeze a page, we generally freeze all XIDs < OldestXmin, only
     197             :      * leaving behind XIDs that are ineligible for freezing, if any.  And so
     198             :      * you might wonder why these trackers are necessary at all; why should
     199             :      * _any_ page that VACUUM freezes _ever_ be left with XIDs/MXIDs that
     200             :      * ratchet back the top-level NewRelfrozenXid/NewRelminMxid trackers?
     201             :      *
     202             :      * It is useful to use a definition of "freeze the page" that does not
     203             :      * overspecify how MultiXacts are affected.  heap_prepare_freeze_tuple
     204             :      * generally prefers to remove Multis eagerly, but lazy processing is used
     205             :      * in cases where laziness allows VACUUM to avoid allocating a new Multi.
     206             :      * The "freeze the page" trackers enable this flexibility.
     207             :      */
     208             :     TransactionId FreezePageRelfrozenXid;
     209             :     MultiXactId FreezePageRelminMxid;
     210             : 
     211             :     /*
     212             :      * "No freeze" NewRelfrozenXid/NewRelminMxid trackers.
     213             :      *
     214             :      * These trackers are maintained in the same way as the trackers used when
     215             :      * VACUUM scans a page that isn't cleanup locked.  Both code paths are
     216             :      * based on the same general idea (do less work for this page during the
     217             :      * ongoing VACUUM, at the cost of having to accept older final values).
     218             :      */
     219             :     TransactionId NoFreezePageRelfrozenXid;
     220             :     MultiXactId NoFreezePageRelminMxid;
     221             : 
     222             : } HeapPageFreeze;
     223             : 
     224             : 
     225             : /* 'reason' codes for heap_page_prune_and_freeze() */
     226             : typedef enum
     227             : {
     228             :     PRUNE_ON_ACCESS,            /* on-access pruning */
     229             :     PRUNE_VACUUM_SCAN,          /* VACUUM 1st heap pass */
     230             :     PRUNE_VACUUM_CLEANUP,       /* VACUUM 2nd heap pass */
     231             : } PruneReason;
     232             : 
     233             : /*
     234             :  * Input parameters to heap_page_prune_and_freeze()
     235             :  */
     236             : typedef struct PruneFreezeParams
     237             : {
     238             :     Relation    relation;       /* relation containing buffer to be pruned */
     239             :     Buffer      buffer;         /* buffer to be pruned */
     240             : 
     241             :     /*
     242             :      * The reason pruning was performed.  It is used to set the WAL record
     243             :      * opcode which is used for debugging and analysis purposes.
     244             :      */
     245             :     PruneReason reason;
     246             : 
     247             :     /*
     248             :      * Contains flag bits:
     249             :      *
     250             :      * HEAP_PAGE_PRUNE_MARK_UNUSED_NOW indicates that dead items can be set
     251             :      * LP_UNUSED during pruning.
     252             :      *
     253             :      * HEAP_PAGE_PRUNE_FREEZE indicates that we will also freeze tuples, and
     254             :      * will return 'all_visible', 'all_frozen' flags to the caller.
     255             :      */
     256             :     int         options;
     257             : 
     258             :     /*
     259             :      * vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
     260             :      * (see heap_prune_satisfies_vacuum).
     261             :      */
     262             :     GlobalVisState *vistest;
     263             : 
     264             :     /*
     265             :      * Contains the cutoffs used for freezing. They are required if the
     266             :      * HEAP_PAGE_PRUNE_FREEZE option is set. cutoffs->OldestXmin is also used
     267             :      * to determine if dead tuples are HEAPTUPLE_RECENTLY_DEAD or
     268             :      * HEAPTUPLE_DEAD. Currently only vacuum passes in cutoffs. Vacuum
     269             :      * calculates them once, at the beginning of vacuuming the relation.
     270             :      */
     271             :     struct VacuumCutoffs *cutoffs;
     272             : } PruneFreezeParams;
     273             : 
     274             : /*
     275             :  * Per-page state returned by heap_page_prune_and_freeze()
     276             :  */
     277             : typedef struct PruneFreezeResult
     278             : {
     279             :     int         ndeleted;       /* Number of tuples deleted from the page */
     280             :     int         nnewlpdead;     /* Number of newly LP_DEAD items */
     281             :     int         nfrozen;        /* Number of tuples we froze */
     282             : 
     283             :     /* Number of live and recently dead tuples on the page, after pruning */
     284             :     int         live_tuples;
     285             :     int         recently_dead_tuples;
     286             : 
     287             :     /*
     288             :      * all_visible and all_frozen indicate if the all-visible and all-frozen
     289             :      * bits in the visibility map can be set for this page, after pruning.
     290             :      *
     291             :      * vm_conflict_horizon is the newest xmin of live tuples on the page.  The
     292             :      * caller can use it as the conflict horizon when setting the VM bits.  It
     293             :      * is only valid if we froze some tuples (nfrozen > 0), and all_frozen is
     294             :      * true.
     295             :      *
     296             :      * These are only set if the HEAP_PRUNE_FREEZE option is set.
     297             :      */
     298             :     bool        all_visible;
     299             :     bool        all_frozen;
     300             :     TransactionId vm_conflict_horizon;
     301             : 
     302             :     /*
     303             :      * Whether or not the page makes rel truncation unsafe.  This is set to
     304             :      * 'true', even if the page contains LP_DEAD items.  VACUUM will remove
     305             :      * them before attempting to truncate.
     306             :      */
     307             :     bool        hastup;
     308             : 
     309             :     /*
     310             :      * LP_DEAD items on the page after pruning.  Includes existing LP_DEAD
     311             :      * items.
     312             :      */
     313             :     int         lpdead_items;
     314             :     OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
     315             : } PruneFreezeResult;
     316             : 
     317             : 
     318             : /* ----------------
     319             :  *      function prototypes for heap access method
     320             :  *
     321             :  * heap_create, heap_create_with_catalog, and heap_drop_with_catalog
     322             :  * are declared in catalog/heap.h
     323             :  * ----------------
     324             :  */
     325             : 
     326             : 
     327             : extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
     328             :                                     int nkeys, ScanKey key,
     329             :                                     ParallelTableScanDesc parallel_scan,
     330             :                                     uint32 flags);
     331             : extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk,
     332             :                                BlockNumber numBlks);
     333             : extern void heap_prepare_pagescan(TableScanDesc sscan);
     334             : extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
     335             :                         bool allow_strat, bool allow_sync, bool allow_pagemode);
     336             : extern void heap_endscan(TableScanDesc sscan);
     337             : extern HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction);
     338             : extern bool heap_getnextslot(TableScanDesc sscan,
     339             :                              ScanDirection direction, TupleTableSlot *slot);
     340             : extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
     341             :                               ItemPointer maxtid);
     342             : extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
     343             :                                       ScanDirection direction,
     344             :                                       TupleTableSlot *slot);
     345             : extern bool heap_fetch(Relation relation, Snapshot snapshot,
     346             :                        HeapTuple tuple, Buffer *userbuf, bool keep_buf);
     347             : extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
     348             :                                    Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
     349             :                                    bool *all_dead, bool first_call);
     350             : 
     351             : extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid);
     352             : 
     353             : extern BulkInsertState GetBulkInsertState(void);
     354             : extern void FreeBulkInsertState(BulkInsertState);
     355             : extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
     356             : 
     357             : extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
     358             :                         int options, BulkInsertState bistate);
     359             : extern void heap_multi_insert(Relation relation, TupleTableSlot **slots,
     360             :                               int ntuples, CommandId cid, int options,
     361             :                               BulkInsertState bistate);
     362             : extern TM_Result heap_delete(Relation relation, const ItemPointerData *tid,
     363             :                              CommandId cid, Snapshot crosscheck, bool wait,
     364             :                              TM_FailureData *tmfd, bool changingPart);
     365             : extern void heap_finish_speculative(Relation relation, const ItemPointerData *tid);
     366             : extern void heap_abort_speculative(Relation relation, const ItemPointerData *tid);
     367             : extern TM_Result heap_update(Relation relation, const ItemPointerData *otid,
     368             :                              HeapTuple newtup,
     369             :                              CommandId cid, Snapshot crosscheck, bool wait,
     370             :                              TM_FailureData *tmfd, LockTupleMode *lockmode,
     371             :                              TU_UpdateIndexes *update_indexes);
     372             : extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
     373             :                                  CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
     374             :                                  bool follow_updates,
     375             :                                  Buffer *buffer, TM_FailureData *tmfd);
     376             : 
     377             : extern bool heap_inplace_lock(Relation relation,
     378             :                               HeapTuple oldtup_ptr, Buffer buffer,
     379             :                               void (*release_callback) (void *), void *arg);
     380             : extern void heap_inplace_update_and_unlock(Relation relation,
     381             :                                            HeapTuple oldtup, HeapTuple tuple,
     382             :                                            Buffer buffer);
     383             : extern void heap_inplace_unlock(Relation relation,
     384             :                                 HeapTuple oldtup, Buffer buffer);
     385             : extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
     386             :                                       const struct VacuumCutoffs *cutoffs,
     387             :                                       HeapPageFreeze *pagefrz,
     388             :                                       HeapTupleFreeze *frz, bool *totally_frozen);
     389             : 
     390             : extern void heap_pre_freeze_checks(Buffer buffer,
     391             :                                    HeapTupleFreeze *tuples, int ntuples);
     392             : extern void heap_freeze_prepared_tuples(Buffer buffer,
     393             :                                         HeapTupleFreeze *tuples, int ntuples);
     394             : extern bool heap_freeze_tuple(HeapTupleHeader tuple,
     395             :                               TransactionId relfrozenxid, TransactionId relminmxid,
     396             :                               TransactionId FreezeLimit, TransactionId MultiXactCutoff);
     397             : extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
     398             :                                      const struct VacuumCutoffs *cutoffs,
     399             :                                      TransactionId *NoFreezePageRelfrozenXid,
     400             :                                      MultiXactId *NoFreezePageRelminMxid);
     401             : extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
     402             : 
     403             : extern void simple_heap_insert(Relation relation, HeapTuple tup);
     404             : extern void simple_heap_delete(Relation relation, const ItemPointerData *tid);
     405             : extern void simple_heap_update(Relation relation, const ItemPointerData *otid,
     406             :                                HeapTuple tup, TU_UpdateIndexes *update_indexes);
     407             : 
     408             : extern TransactionId heap_index_delete_tuples(Relation rel,
     409             :                                               TM_IndexDeleteOp *delstate);
     410             : 
     411             : /* in heap/pruneheap.c */
     412             : extern void heap_page_prune_opt(Relation relation, Buffer buffer);
     413             : extern void heap_page_prune_and_freeze(PruneFreezeParams *params,
     414             :                                        PruneFreezeResult *presult,
     415             :                                        OffsetNumber *off_loc,
     416             :                                        TransactionId *new_relfrozen_xid,
     417             :                                        MultiXactId *new_relmin_mxid);
     418             : extern void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only,
     419             :                                     OffsetNumber *redirected, int nredirected,
     420             :                                     OffsetNumber *nowdead, int ndead,
     421             :                                     OffsetNumber *nowunused, int nunused);
     422             : extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
     423             : extern void log_heap_prune_and_freeze(Relation relation, Buffer buffer,
     424             :                                       Buffer vmbuffer, uint8 vmflags,
     425             :                                       TransactionId conflict_xid,
     426             :                                       bool cleanup_lock,
     427             :                                       PruneReason reason,
     428             :                                       HeapTupleFreeze *frozen, int nfrozen,
     429             :                                       OffsetNumber *redirected, int nredirected,
     430             :                                       OffsetNumber *dead, int ndead,
     431             :                                       OffsetNumber *unused, int nunused);
     432             : 
     433             : /* in heap/vacuumlazy.c */
     434             : extern void heap_vacuum_rel(Relation rel,
     435             :                             const VacuumParams params, BufferAccessStrategy bstrategy);
     436             : 
     437             : /* in heap/heapam_visibility.c */
     438             : extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot,
     439             :                                          Buffer buffer);
     440             : extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
     441             :                                           Buffer buffer);
     442             : extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
     443             :                                             Buffer buffer);
     444             : extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer,
     445             :                                                    TransactionId *dead_after);
     446             : extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
     447             :                                  uint16 infomask, TransactionId xid);
     448             : extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
     449             : extern bool HeapTupleIsSurelyDead(HeapTuple htup,
     450             :                                   GlobalVisState *vistest);
     451             : 
     452             : /*
     453             :  * To avoid leaking too much knowledge about reorderbuffer implementation
     454             :  * details this is implemented in reorderbuffer.c not heapam_visibility.c
     455             :  */
     456             : struct HTAB;
     457             : extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
     458             :                                           Snapshot snapshot,
     459             :                                           HeapTuple htup,
     460             :                                           Buffer buffer,
     461             :                                           CommandId *cmin, CommandId *cmax);
     462             : extern void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple,
     463             :                                                 Buffer buffer, Snapshot snapshot);
     464             : 
     465             : /*
     466             :  * heap_execute_freeze_tuple
     467             :  *      Execute the prepared freezing of a tuple with caller's freeze plan.
     468             :  *
     469             :  * Caller is responsible for ensuring that no other backend can access the
     470             :  * storage underlying this tuple, either by holding an exclusive lock on the
     471             :  * buffer containing it (which is what lazy VACUUM does), or by having it be
     472             :  * in private storage (which is what CLUSTER and friends do).
     473             :  */
     474             : static inline void
     475     2515242 : heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
     476             : {
     477     2515242 :     HeapTupleHeaderSetXmax(tuple, frz->xmax);
     478             : 
     479     2515242 :     if (frz->frzflags & XLH_FREEZE_XVAC)
     480           0 :         HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
     481             : 
     482     2515242 :     if (frz->frzflags & XLH_INVALID_XVAC)
     483           0 :         HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
     484             : 
     485     2515242 :     tuple->t_infomask = frz->t_infomask;
     486     2515242 :     tuple->t_infomask2 = frz->t_infomask2;
     487     2515242 : }
     488             : 
     489             : #endif                          /* HEAPAM_H */

Generated by: LCOV version 1.16