LCOV - code coverage report
Current view: top level - src/include/access - heapam.h (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 77.8 % 9 7
Test Date: 2026-04-07 14:16:30 Functions: 100.0 % 1 1
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * heapam.h
       4              :  *    POSTGRES heap access method definitions.
       5              :  *
       6              :  *
       7              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       8              :  * Portions Copyright (c) 1994, Regents of the University of California
       9              :  *
      10              :  * src/include/access/heapam.h
      11              :  *
      12              :  *-------------------------------------------------------------------------
      13              :  */
      14              : #ifndef HEAPAM_H
      15              : #define HEAPAM_H
      16              : 
      17              : #include "access/heapam_xlog.h"
      18              : #include "access/relation.h"  /* for backward compatibility */
      19              : #include "access/relscan.h"
      20              : #include "access/sdir.h"
      21              : #include "access/skey.h"
      22              : #include "access/table.h"     /* for backward compatibility */
      23              : #include "access/tableam.h"
      24              : #include "nodes/lockoptions.h"
      25              : #include "nodes/primnodes.h"
      26              : #include "storage/bufpage.h"
      27              : #include "storage/dsm.h"
      28              : #include "storage/lockdefs.h"
      29              : #include "storage/read_stream.h"
      30              : #include "storage/shm_toc.h"
      31              : #include "utils/relcache.h"
      32              : #include "utils/snapshot.h"
      33              : 
      34              : 
      35              : /* "options" flag bits for heap_insert */
      36              : #define HEAP_INSERT_SKIP_FSM    TABLE_INSERT_SKIP_FSM
      37              : #define HEAP_INSERT_FROZEN      TABLE_INSERT_FROZEN
      38              : #define HEAP_INSERT_NO_LOGICAL  TABLE_INSERT_NO_LOGICAL
      39              : #define HEAP_INSERT_SPECULATIVE 0x0010
      40              : 
      41              : /* "options" flag bits for heap_page_prune_and_freeze */
      42              : #define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW     (1 << 0)
      43              : #define HEAP_PAGE_PRUNE_FREEZE              (1 << 1)
      44              : #define HEAP_PAGE_PRUNE_ALLOW_FAST_PATH     (1 << 2)
      45              : #define HEAP_PAGE_PRUNE_SET_VM              (1 << 3)
      46              : 
      47              : typedef struct BulkInsertStateData *BulkInsertState;
      48              : typedef struct GlobalVisState GlobalVisState;
      49              : typedef struct TupleTableSlot TupleTableSlot;
      50              : typedef struct VacuumCutoffs VacuumCutoffs;
      51              : typedef struct VacuumParams VacuumParams;
      52              : 
      53              : #define MaxLockTupleMode    LockTupleExclusive
      54              : 
      55              : /*
      56              :  * Descriptor for heap table scans.
      57              :  */
      58              : typedef struct HeapScanDescData
      59              : {
      60              :     TableScanDescData rs_base;  /* AM independent part of the descriptor */
      61              : 
      62              :     /* state set up at initscan time */
      63              :     BlockNumber rs_nblocks;     /* total number of blocks in rel */
      64              :     BlockNumber rs_startblock;  /* block # to start at */
      65              :     BlockNumber rs_numblocks;   /* max number of blocks to scan */
      66              :     /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
      67              : 
      68              :     /* scan current state */
      69              :     bool        rs_inited;      /* false = scan not init'd yet */
      70              :     OffsetNumber rs_coffset;    /* current offset # in non-page-at-a-time mode */
      71              :     BlockNumber rs_cblock;      /* current block # in scan, if any */
      72              :     Buffer      rs_cbuf;        /* current buffer in scan, if any */
      73              :     /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
      74              : 
      75              :     BufferAccessStrategy rs_strategy;   /* access strategy for reads */
      76              : 
      77              :     HeapTupleData rs_ctup;      /* current tuple in scan, if any */
      78              : 
      79              :     /* For scans that stream reads */
      80              :     ReadStream *rs_read_stream;
      81              : 
      82              :     /*
      83              :      * For sequential scans and TID range scans to stream reads. The read
      84              :      * stream is allocated at the beginning of the scan and reset on rescan or
      85              :      * when the scan direction changes. The scan direction is saved each time
      86              :      * a new page is requested. If the scan direction changes from one page to
      87              :      * the next, the read stream releases all previously pinned buffers and
      88              :      * resets the prefetch block.
      89              :      */
      90              :     ScanDirection rs_dir;
      91              :     BlockNumber rs_prefetch_block;
      92              : 
      93              :     /*
      94              :      * For parallel scans to store page allocation data.  NULL when not
      95              :      * performing a parallel scan.
      96              :      */
      97              :     ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
      98              : 
      99              :     /* Current heap block's corresponding page in the visibility map */
     100              :     Buffer      rs_vmbuffer;
     101              : 
     102              :     /* these fields only used in page-at-a-time mode and for bitmap scans */
     103              :     uint32      rs_cindex;      /* current tuple's index in vistuples */
     104              :     uint32      rs_ntuples;     /* number of visible tuples on page */
     105              :     OffsetNumber rs_vistuples[MaxHeapTuplesPerPage];    /* their offsets */
     106              : } HeapScanDescData;
     107              : typedef struct HeapScanDescData *HeapScanDesc;
     108              : 
     109              : typedef struct BitmapHeapScanDescData
     110              : {
     111              :     HeapScanDescData rs_heap_base;
     112              : 
     113              :     /* Holds no data */
     114              : } BitmapHeapScanDescData;
     115              : typedef struct BitmapHeapScanDescData *BitmapHeapScanDesc;
     116              : 
     117              : /*
     118              :  * Descriptor for fetches from heap via an index.
     119              :  */
     120              : typedef struct IndexFetchHeapData
     121              : {
     122              :     IndexFetchTableData xs_base;    /* AM independent part of the descriptor */
     123              : 
     124              :     /*
     125              :      * Current heap buffer in scan (and its block number), if any.  NB: if
     126              :      * xs_blk is not InvalidBlockNumber, we hold a pin in xs_cbuf.
     127              :      */
     128              :     Buffer      xs_cbuf;
     129              :     BlockNumber xs_blk;
     130              : 
     131              :     /* Current heap block's corresponding page in the visibility map */
     132              :     Buffer      xs_vmbuffer;
     133              : } IndexFetchHeapData;
     134              : 
     135              : /* Result codes for HeapTupleSatisfiesVacuum */
     136              : typedef enum
     137              : {
     138              :     HEAPTUPLE_DEAD,             /* tuple is dead and deletable */
     139              :     HEAPTUPLE_LIVE,             /* tuple is live (committed, no deleter) */
     140              :     HEAPTUPLE_RECENTLY_DEAD,    /* tuple is dead, but not deletable yet */
     141              :     HEAPTUPLE_INSERT_IN_PROGRESS,   /* inserting xact is still in progress */
     142              :     HEAPTUPLE_DELETE_IN_PROGRESS,   /* deleting xact is still in progress */
     143              : } HTSV_Result;
     144              : 
     145              : /*
     146              :  * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
     147              :  * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
     148              :  */
     149              : #define     HEAP_FREEZE_CHECK_XMIN_COMMITTED    0x01
     150              : #define     HEAP_FREEZE_CHECK_XMAX_ABORTED      0x02
     151              : 
     152              : /* heap_prepare_freeze_tuple state describing how to freeze a tuple */
     153              : typedef struct HeapTupleFreeze
     154              : {
     155              :     /* Fields describing how to process tuple */
     156              :     TransactionId xmax;
     157              :     uint16      t_infomask2;
     158              :     uint16      t_infomask;
     159              :     uint8       frzflags;
     160              : 
     161              :     /* xmin/xmax check flags */
     162              :     uint8       checkflags;
     163              :     /* Page offset number for tuple */
     164              :     OffsetNumber offset;
     165              : } HeapTupleFreeze;
     166              : 
     167              : /*
     168              :  * State used by VACUUM to track the details of freezing all eligible tuples
     169              :  * on a given heap page.
     170              :  *
     171              :  * VACUUM prepares freeze plans for each page via heap_prepare_freeze_tuple
     172              :  * calls (every tuple with storage gets its own call).  This page-level freeze
     173              :  * state is updated across each call, which ultimately determines whether or
     174              :  * not freezing the page is required.
     175              :  *
     176              :  * Aside from the basic question of whether or not freezing will go ahead, the
     177              :  * state also tracks the oldest extant XID/MXID in the table as a whole, for
     178              :  * the purposes of advancing relfrozenxid/relminmxid values in pg_class later
     179              :  * on.  Each heap_prepare_freeze_tuple call pushes NewRelfrozenXid and/or
     180              :  * NewRelminMxid back as required to avoid unsafe final pg_class values.  Any
     181              :  * and all unfrozen XIDs or MXIDs that remain after VACUUM finishes _must_
     182              :  * have values >= the final relfrozenxid/relminmxid values in pg_class.  This
     183              :  * includes XIDs that remain as MultiXact members from any tuple's xmax.
     184              :  *
     185              :  * When 'freeze_required' flag isn't set after all tuples are examined, the
     186              :  * final choice on freezing is made by vacuumlazy.c.  It can decide to trigger
     187              :  * freezing based on whatever criteria it deems appropriate.  However, it is
     188              :  * recommended that vacuumlazy.c avoid early freezing when freezing does not
     189              :  * enable setting the target page all-frozen in the visibility map afterwards.
     190              :  */
     191              : typedef struct HeapPageFreeze
     192              : {
     193              :     /* Is heap_prepare_freeze_tuple caller required to freeze page? */
     194              :     bool        freeze_required;
     195              : 
     196              :     /*
     197              :      * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
     198              :      *
     199              :      * Trackers used when heap_freeze_execute_prepared freezes, or when there
     200              :      * are zero freeze plans for a page.  It is always valid for vacuumlazy.c
     201              :      * to freeze any page, by definition.  This even includes pages that have
     202              :      * no tuples with storage to consider in the first place.  That way the
     203              :      * 'totally_frozen' results from heap_prepare_freeze_tuple can always be
     204              :      * used in the same way, even when no freeze plans need to be executed to
     205              :      * "freeze the page".  Only the "freeze" path needs to consider the need
     206              :      * to set pages all-frozen in the visibility map under this scheme.
     207              :      *
     208              :      * When we freeze a page, we generally freeze all XIDs < OldestXmin, only
     209              :      * leaving behind XIDs that are ineligible for freezing, if any.  And so
     210              :      * you might wonder why these trackers are necessary at all; why should
     211              :      * _any_ page that VACUUM freezes _ever_ be left with XIDs/MXIDs that
     212              :      * ratchet back the top-level NewRelfrozenXid/NewRelminMxid trackers?
     213              :      *
     214              :      * It is useful to use a definition of "freeze the page" that does not
     215              :      * overspecify how MultiXacts are affected.  heap_prepare_freeze_tuple
     216              :      * generally prefers to remove Multis eagerly, but lazy processing is used
     217              :      * in cases where laziness allows VACUUM to avoid allocating a new Multi.
     218              :      * The "freeze the page" trackers enable this flexibility.
     219              :      */
     220              :     TransactionId FreezePageRelfrozenXid;
     221              :     MultiXactId FreezePageRelminMxid;
     222              : 
     223              :     /*
     224              :      * Newest XID that this page's freeze actions will remove from tuple
     225              :      * visibility metadata (currently xmin and/or xvac). It is used to derive
     226              :      * the snapshot conflict horizon for a WAL record that freezes tuples. On
     227              :      * a standby, we must not replay that change while any snapshot could
     228              :      * still treat that XID as running.
     229              :      *
     230              :      * It's only used if we execute freeze plans for this page, so there is no
     231              :      * corresponding "no freeze" tracker.
     232              :      */
     233              :     TransactionId FreezePageConflictXid;
     234              : 
     235              :     /*
     236              :      * "No freeze" NewRelfrozenXid/NewRelminMxid trackers.
     237              :      *
     238              :      * These trackers are maintained in the same way as the trackers used when
     239              :      * VACUUM scans a page that isn't cleanup locked.  Both code paths are
     240              :      * based on the same general idea (do less work for this page during the
     241              :      * ongoing VACUUM, at the cost of having to accept older final values).
     242              :      */
     243              :     TransactionId NoFreezePageRelfrozenXid;
     244              :     MultiXactId NoFreezePageRelminMxid;
     245              : 
     246              : } HeapPageFreeze;
     247              : 
     248              : 
     249              : /* 'reason' codes for heap_page_prune_and_freeze() */
     250              : typedef enum
     251              : {
     252              :     PRUNE_ON_ACCESS,            /* on-access pruning */
     253              :     PRUNE_VACUUM_SCAN,          /* VACUUM 1st heap pass */
     254              :     PRUNE_VACUUM_CLEANUP,       /* VACUUM 2nd heap pass */
     255              : } PruneReason;
     256              : 
     257              : /*
     258              :  * Input parameters to heap_page_prune_and_freeze()
     259              :  */
     260              : typedef struct PruneFreezeParams
     261              : {
     262              :     Relation    relation;       /* relation containing buffer to be pruned */
     263              :     Buffer      buffer;         /* buffer to be pruned */
     264              : 
     265              :     /*
     266              :      * Callers should provide a pinned vmbuffer corresponding to the heap
     267              :      * block in buffer. We will check for and repair any corruption in the VM
     268              :      * and set the VM after pruning if the page is all-visible/all-frozen.
     269              :      */
     270              :     Buffer      vmbuffer;
     271              : 
     272              :     /*
     273              :      * The reason pruning was performed.  It is used to set the WAL record
     274              :      * opcode which is used for debugging and analysis purposes.
     275              :      */
     276              :     PruneReason reason;
     277              : 
     278              :     /*
     279              :      * Contains flag bits:
     280              :      *
     281              :      * HEAP_PAGE_PRUNE_MARK_UNUSED_NOW indicates that dead items can be set
     282              :      * LP_UNUSED during pruning.
     283              :      *
     284              :      * HEAP_PAGE_PRUNE_FREEZE indicates that we will also freeze tuples.
     285              :      */
     286              :     int         options;
     287              : 
     288              :     /*
     289              :      * vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
     290              :      * (see heap_prune_satisfies_vacuum).
     291              :      */
     292              :     GlobalVisState *vistest;
     293              : 
     294              :     /*
     295              :      * Contains the cutoffs used for freezing. They are required if the
     296              :      * HEAP_PAGE_PRUNE_FREEZE option is set. cutoffs->OldestXmin is also used
     297              :      * to determine if dead tuples are HEAPTUPLE_RECENTLY_DEAD or
     298              :      * HEAPTUPLE_DEAD. Currently only vacuum passes in cutoffs. Vacuum
     299              :      * calculates them once, at the beginning of vacuuming the relation.
     300              :      */
     301              :     VacuumCutoffs *cutoffs;
     302              : } PruneFreezeParams;
     303              : 
     304              : /*
     305              :  * Per-page state returned by heap_page_prune_and_freeze()
     306              :  */
     307              : typedef struct PruneFreezeResult
     308              : {
     309              :     int         ndeleted;       /* Number of tuples deleted from the page */
     310              :     int         nnewlpdead;     /* Number of newly LP_DEAD items */
     311              :     int         nfrozen;        /* Number of tuples we froze */
     312              : 
     313              :     /* Number of live and recently dead tuples on the page, after pruning */
     314              :     int         live_tuples;
     315              :     int         recently_dead_tuples;
     316              : 
     317              :     /*
     318              :      * Whether or not the page was newly set all-visible and all-frozen during
     319              :      * phase I of vacuuming.
     320              :      */
     321              :     bool        newly_all_visible;
     322              :     bool        newly_all_visible_frozen;
     323              :     bool        newly_all_frozen;
     324              : 
     325              :     /*
     326              :      * Whether or not the page makes rel truncation unsafe.  This is set to
     327              :      * 'true', even if the page contains LP_DEAD items.  VACUUM will remove
     328              :      * them before attempting to truncate.
     329              :      */
     330              :     bool        hastup;
     331              : 
     332              :     /*
     333              :      * LP_DEAD items on the page after pruning.  Includes existing LP_DEAD
     334              :      * items.
     335              :      */
     336              :     int         lpdead_items;
     337              :     OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
     338              : } PruneFreezeResult;
     339              : 
     340              : 
     341              : /* ----------------
     342              :  *      function prototypes for heap access method
     343              :  *
     344              :  * heap_create, heap_create_with_catalog, and heap_drop_with_catalog
     345              :  * are declared in catalog/heap.h
     346              :  * ----------------
     347              :  */
     348              : 
     349              : 
     350              : extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
     351              :                                     int nkeys, ScanKey key,
     352              :                                     ParallelTableScanDesc parallel_scan,
     353              :                                     uint32 flags);
     354              : extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk,
     355              :                                BlockNumber numBlks);
     356              : extern void heap_prepare_pagescan(TableScanDesc sscan);
     357              : extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
     358              :                         bool allow_strat, bool allow_sync, bool allow_pagemode);
     359              : extern void heap_endscan(TableScanDesc sscan);
     360              : extern HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction);
     361              : extern bool heap_getnextslot(TableScanDesc sscan,
     362              :                              ScanDirection direction, TupleTableSlot *slot);
     363              : extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
     364              :                               ItemPointer maxtid);
     365              : extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
     366              :                                       ScanDirection direction,
     367              :                                       TupleTableSlot *slot);
     368              : extern bool heap_fetch(Relation relation, Snapshot snapshot,
     369              :                        HeapTuple tuple, Buffer *userbuf, bool keep_buf);
     370              : 
     371              : extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid);
     372              : 
     373              : extern BulkInsertState GetBulkInsertState(void);
     374              : extern void FreeBulkInsertState(BulkInsertState);
     375              : extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
     376              : 
     377              : extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
     378              :                         uint32 options, BulkInsertState bistate);
     379              : extern void heap_multi_insert(Relation relation, TupleTableSlot **slots,
     380              :                               int ntuples, CommandId cid, uint32 options,
     381              :                               BulkInsertState bistate);
     382              : extern TM_Result heap_delete(Relation relation, const ItemPointerData *tid,
     383              :                              CommandId cid, uint32 options, Snapshot crosscheck,
     384              :                              bool wait, TM_FailureData *tmfd);
     385              : extern void heap_finish_speculative(Relation relation, const ItemPointerData *tid);
     386              : extern void heap_abort_speculative(Relation relation, const ItemPointerData *tid);
     387              : extern TM_Result heap_update(Relation relation, const ItemPointerData *otid,
     388              :                              HeapTuple newtup,
     389              :                              CommandId cid, uint32 options,
     390              :                              Snapshot crosscheck, bool wait,
     391              :                              TM_FailureData *tmfd, LockTupleMode *lockmode,
     392              :                              TU_UpdateIndexes *update_indexes);
     393              : extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
     394              :                                  CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
     395              :                                  bool follow_updates,
     396              :                                  Buffer *buffer, TM_FailureData *tmfd);
     397              : 
     398              : extern bool heap_inplace_lock(Relation relation,
     399              :                               HeapTuple oldtup_ptr, Buffer buffer,
     400              :                               void (*release_callback) (void *), void *arg);
     401              : extern void heap_inplace_update_and_unlock(Relation relation,
     402              :                                            HeapTuple oldtup, HeapTuple tuple,
     403              :                                            Buffer buffer);
     404              : extern void heap_inplace_unlock(Relation relation,
     405              :                                 HeapTuple oldtup, Buffer buffer);
     406              : extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
     407              :                                       const VacuumCutoffs *cutoffs,
     408              :                                       HeapPageFreeze *pagefrz,
     409              :                                       HeapTupleFreeze *frz, bool *totally_frozen);
     410              : 
     411              : extern void heap_pre_freeze_checks(Buffer buffer,
     412              :                                    HeapTupleFreeze *tuples, int ntuples);
     413              : extern void heap_freeze_prepared_tuples(Buffer buffer,
     414              :                                         HeapTupleFreeze *tuples, int ntuples);
     415              : extern bool heap_freeze_tuple(HeapTupleHeader tuple,
     416              :                               TransactionId relfrozenxid, TransactionId relminmxid,
     417              :                               TransactionId FreezeLimit, TransactionId MultiXactCutoff);
     418              : extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
     419              :                                      const VacuumCutoffs *cutoffs,
     420              :                                      TransactionId *NoFreezePageRelfrozenXid,
     421              :                                      MultiXactId *NoFreezePageRelminMxid);
     422              : extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
     423              : 
     424              : extern void simple_heap_insert(Relation relation, HeapTuple tup);
     425              : extern void simple_heap_delete(Relation relation, const ItemPointerData *tid);
     426              : extern void simple_heap_update(Relation relation, const ItemPointerData *otid,
     427              :                                HeapTuple tup, TU_UpdateIndexes *update_indexes);
     428              : 
     429              : extern TransactionId heap_index_delete_tuples(Relation rel,
     430              :                                               TM_IndexDeleteOp *delstate);
     431              : 
     432              : /* in heap/heapam_indexscan.c */
     433              : extern IndexFetchTableData *heapam_index_fetch_begin(Relation rel, uint32 flags);
     434              : extern void heapam_index_fetch_reset(IndexFetchTableData *scan);
     435              : extern void heapam_index_fetch_end(IndexFetchTableData *scan);
     436              : extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
     437              :                                    Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
     438              :                                    bool *all_dead, bool first_call);
     439              : extern bool heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
     440              :                                      ItemPointer tid, Snapshot snapshot,
     441              :                                      TupleTableSlot *slot, bool *heap_continue,
     442              :                                      bool *all_dead);
     443              : 
     444              : /* in heap/pruneheap.c */
     445              : extern void heap_page_prune_opt(Relation relation, Buffer buffer,
     446              :                                 Buffer *vmbuffer, bool rel_read_only);
     447              : extern void heap_page_prune_and_freeze(PruneFreezeParams *params,
     448              :                                        PruneFreezeResult *presult,
     449              :                                        OffsetNumber *off_loc,
     450              :                                        TransactionId *new_relfrozen_xid,
     451              :                                        MultiXactId *new_relmin_mxid);
     452              : extern void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only,
     453              :                                     OffsetNumber *redirected, int nredirected,
     454              :                                     OffsetNumber *nowdead, int ndead,
     455              :                                     OffsetNumber *nowunused, int nunused);
     456              : extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
     457              : extern void log_heap_prune_and_freeze(Relation relation, Buffer buffer,
     458              :                                       Buffer vmbuffer, uint8 vmflags,
     459              :                                       TransactionId conflict_xid,
     460              :                                       bool cleanup_lock,
     461              :                                       PruneReason reason,
     462              :                                       HeapTupleFreeze *frozen, int nfrozen,
     463              :                                       OffsetNumber *redirected, int nredirected,
     464              :                                       OffsetNumber *dead, int ndead,
     465              :                                       OffsetNumber *unused, int nunused);
     466              : 
     467              : /* in heap/vacuumlazy.c */
     468              : extern void heap_vacuum_rel(Relation rel,
     469              :                             const VacuumParams *params, BufferAccessStrategy bstrategy);
     470              : #ifdef USE_ASSERT_CHECKING
     471              : extern bool heap_page_is_all_visible(Relation rel, Buffer buf,
     472              :                                      GlobalVisState *vistest,
     473              :                                      bool *all_frozen,
     474              :                                      TransactionId *newest_live_xid,
     475              :                                      OffsetNumber *logging_offnum);
     476              : #endif
     477              : 
     478              : /* in heap/heapam_visibility.c */
     479              : extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot,
     480              :                                          Buffer buffer);
     481              : extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
     482              :                                           Buffer buffer);
     483              : extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
     484              :                                             Buffer buffer);
     485              : extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer,
     486              :                                                    TransactionId *dead_after);
     487              : extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
     488              :                                  uint16 infomask, TransactionId xid);
     489              : extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
     490              : extern bool HeapTupleIsSurelyDead(HeapTuple htup,
     491              :                                   GlobalVisState *vistest);
     492              : 
     493              : /*
     494              :  * Some of the input/output to/from HeapTupleSatisfiesMVCCBatch() is passed
     495              :  * via this struct, as otherwise the increased number of arguments to
     496              :  * HeapTupleSatisfiesMVCCBatch() leads to on-stack argument passing on x86-64,
     497              :  * which causes a small regression.
     498              :  */
     499              : typedef struct BatchMVCCState
     500              : {
     501              :     HeapTupleData tuples[MaxHeapTuplesPerPage];
     502              :     bool        visible[MaxHeapTuplesPerPage];
     503              : } BatchMVCCState;
     504              : 
     505              : extern int  HeapTupleSatisfiesMVCCBatch(Snapshot snapshot, Buffer buffer,
     506              :                                         int ntups,
     507              :                                         BatchMVCCState *batchmvcc,
     508              :                                         OffsetNumber *vistuples_dense);
     509              : 
     510              : /*
     511              :  * To avoid leaking too much knowledge about reorderbuffer implementation
     512              :  * details this is implemented in reorderbuffer.c not heapam_visibility.c
     513              :  */
     514              : struct HTAB;
     515              : extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
     516              :                                           Snapshot snapshot,
     517              :                                           HeapTuple htup,
     518              :                                           Buffer buffer,
     519              :                                           CommandId *cmin, CommandId *cmax);
     520              : extern void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple,
     521              :                                                 Buffer buffer, Snapshot snapshot);
     522              : 
     523              : /*
     524              :  * heap_execute_freeze_tuple
     525              :  *      Execute the prepared freezing of a tuple with caller's freeze plan.
     526              :  *
     527              :  * Caller is responsible for ensuring that no other backend can access the
     528              :  * storage underlying this tuple, either by holding an exclusive lock on the
     529              :  * buffer containing it (which is what lazy VACUUM does), or by having it be
     530              :  * in private storage (which is what CLUSTER and friends do).
     531              :  */
     532              : static inline void
     533      1663403 : heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
     534              : {
     535      1663403 :     HeapTupleHeaderSetXmax(tuple, frz->xmax);
     536              : 
     537      1663403 :     if (frz->frzflags & XLH_FREEZE_XVAC)
     538            0 :         HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
     539              : 
     540      1663403 :     if (frz->frzflags & XLH_INVALID_XVAC)
     541            0 :         HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
     542              : 
     543      1663403 :     tuple->t_infomask = frz->t_infomask;
     544      1663403 :     tuple->t_infomask2 = frz->t_infomask2;
     545      1663403 : }
     546              : 
     547              : #endif                          /* HEAPAM_H */
        

Generated by: LCOV version 2.0-1