LCOV - code coverage report
Current view: top level - src/include/access - heapam.h (source / functions) Hit Total Coverage
Test: PostgreSQL 17devel Lines: 2 2 100.0 %
Date: 2024-02-22 00:11:39 Functions: 1 1 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * heapam.h
       4             :  *    POSTGRES heap access method definitions.
       5             :  *
       6             :  *
       7             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
       8             :  * Portions Copyright (c) 1994, Regents of the University of California
       9             :  *
      10             :  * src/include/access/heapam.h
      11             :  *
      12             :  *-------------------------------------------------------------------------
      13             :  */
      14             : #ifndef HEAPAM_H
      15             : #define HEAPAM_H
      16             : 
      17             : #include "access/relation.h"  /* for backward compatibility */
      18             : #include "access/relscan.h"
      19             : #include "access/sdir.h"
      20             : #include "access/skey.h"
      21             : #include "access/table.h"     /* for backward compatibility */
      22             : #include "access/tableam.h"
      23             : #include "nodes/lockoptions.h"
      24             : #include "nodes/primnodes.h"
      25             : #include "storage/bufpage.h"
      26             : #include "storage/dsm.h"
      27             : #include "storage/lockdefs.h"
      28             : #include "storage/shm_toc.h"
      29             : #include "utils/relcache.h"
      30             : #include "utils/snapshot.h"
      31             : 
      32             : 
      33             : /* "options" flag bits for heap_insert */
      34             : #define HEAP_INSERT_SKIP_FSM    TABLE_INSERT_SKIP_FSM
      35             : #define HEAP_INSERT_FROZEN      TABLE_INSERT_FROZEN
      36             : #define HEAP_INSERT_NO_LOGICAL  TABLE_INSERT_NO_LOGICAL
      37             : #define HEAP_INSERT_SPECULATIVE 0x0010
      38             : 
      39             : typedef struct BulkInsertStateData *BulkInsertState;
      40             : struct TupleTableSlot;
      41             : struct VacuumCutoffs;
      42             : 
      43             : #define MaxLockTupleMode    LockTupleExclusive
      44             : 
      45             : /*
      46             :  * Descriptor for heap table scans.
      47             :  */
      48             : typedef struct HeapScanDescData
      49             : {
      50             :     TableScanDescData rs_base;  /* AM independent part of the descriptor */
      51             : 
      52             :     /* state set up at initscan time */
      53             :     BlockNumber rs_nblocks;     /* total number of blocks in rel */
      54             :     BlockNumber rs_startblock;  /* block # to start at */
      55             :     BlockNumber rs_numblocks;   /* max number of blocks to scan */
      56             :     /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
      57             : 
      58             :     /* scan current state */
      59             :     bool        rs_inited;      /* false = scan not init'd yet */
      60             :     OffsetNumber rs_coffset;    /* current offset # in non-page-at-a-time mode */
      61             :     BlockNumber rs_cblock;      /* current block # in scan, if any */
      62             :     Buffer      rs_cbuf;        /* current buffer in scan, if any */
      63             :     /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
      64             : 
      65             :     BufferAccessStrategy rs_strategy;   /* access strategy for reads */
      66             : 
      67             :     HeapTupleData rs_ctup;      /* current tuple in scan, if any */
      68             : 
      69             :     /*
      70             :      * For parallel scans to store page allocation data.  NULL when not
      71             :      * performing a parallel scan.
      72             :      */
      73             :     ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
      74             : 
      75             :     /* these fields only used in page-at-a-time mode and for bitmap scans */
      76             :     int         rs_cindex;      /* current tuple's index in vistuples */
      77             :     int         rs_ntuples;     /* number of visible tuples on page */
      78             :     OffsetNumber rs_vistuples[MaxHeapTuplesPerPage];    /* their offsets */
      79             : }           HeapScanDescData;
      80             : typedef struct HeapScanDescData *HeapScanDesc;
      81             : 
      82             : /*
      83             :  * Descriptor for fetches from heap via an index.
      84             :  */
      85             : typedef struct IndexFetchHeapData
      86             : {
      87             :     IndexFetchTableData xs_base;    /* AM independent part of the descriptor */
      88             : 
      89             :     Buffer      xs_cbuf;        /* current heap buffer in scan, if any */
      90             :     /* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
      91             : } IndexFetchHeapData;
      92             : 
      93             : /* Result codes for HeapTupleSatisfiesVacuum */
      94             : typedef enum
      95             : {
      96             :     HEAPTUPLE_DEAD,             /* tuple is dead and deletable */
      97             :     HEAPTUPLE_LIVE,             /* tuple is live (committed, no deleter) */
      98             :     HEAPTUPLE_RECENTLY_DEAD,    /* tuple is dead, but not deletable yet */
      99             :     HEAPTUPLE_INSERT_IN_PROGRESS,   /* inserting xact is still in progress */
     100             :     HEAPTUPLE_DELETE_IN_PROGRESS,   /* deleting xact is still in progress */
     101             : } HTSV_Result;
     102             : 
     103             : /*
     104             :  * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
     105             :  * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
     106             :  */
     107             : #define     HEAP_FREEZE_CHECK_XMIN_COMMITTED    0x01
     108             : #define     HEAP_FREEZE_CHECK_XMAX_ABORTED      0x02
     109             : 
     110             : /* heap_prepare_freeze_tuple state describing how to freeze a tuple */
     111             : typedef struct HeapTupleFreeze
     112             : {
     113             :     /* Fields describing how to process tuple */
     114             :     TransactionId xmax;
     115             :     uint16      t_infomask2;
     116             :     uint16      t_infomask;
     117             :     uint8       frzflags;
     118             : 
     119             :     /* xmin/xmax check flags */
     120             :     uint8       checkflags;
     121             :     /* Page offset number for tuple */
     122             :     OffsetNumber offset;
     123             : } HeapTupleFreeze;
     124             : 
     125             : /*
     126             :  * State used by VACUUM to track the details of freezing all eligible tuples
     127             :  * on a given heap page.
     128             :  *
     129             :  * VACUUM prepares freeze plans for each page via heap_prepare_freeze_tuple
     130             :  * calls (every tuple with storage gets its own call).  This page-level freeze
     131             :  * state is updated across each call, which ultimately determines whether or
     132             :  * not freezing the page is required.
     133             :  *
     134             :  * Aside from the basic question of whether or not freezing will go ahead, the
     135             :  * state also tracks the oldest extant XID/MXID in the table as a whole, for
     136             :  * the purposes of advancing relfrozenxid/relminmxid values in pg_class later
     137             :  * on.  Each heap_prepare_freeze_tuple call pushes NewRelfrozenXid and/or
     138             :  * NewRelminMxid back as required to avoid unsafe final pg_class values.  Any
     139             :  * and all unfrozen XIDs or MXIDs that remain after VACUUM finishes _must_
     140             :  * have values >= the final relfrozenxid/relminmxid values in pg_class.  This
     141             :  * includes XIDs that remain as MultiXact members from any tuple's xmax.
     142             :  *
     143             :  * When 'freeze_required' flag isn't set after all tuples are examined, the
     144             :  * final choice on freezing is made by vacuumlazy.c.  It can decide to trigger
     145             :  * freezing based on whatever criteria it deems appropriate.  However, it is
     146             :  * recommended that vacuumlazy.c avoid early freezing when freezing does not
     147             :  * enable setting the target page all-frozen in the visibility map afterwards.
     148             :  */
     149             : typedef struct HeapPageFreeze
     150             : {
     151             :     /* Is heap_prepare_freeze_tuple caller required to freeze page? */
     152             :     bool        freeze_required;
     153             : 
     154             :     /*
     155             :      * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
     156             :      *
     157             :      * Trackers used when heap_freeze_execute_prepared freezes, or when there
     158             :      * are zero freeze plans for a page.  It is always valid for vacuumlazy.c
     159             :      * to freeze any page, by definition.  This even includes pages that have
     160             :      * no tuples with storage to consider in the first place.  That way the
     161             :      * 'totally_frozen' results from heap_prepare_freeze_tuple can always be
     162             :      * used in the same way, even when no freeze plans need to be executed to
     163             :      * "freeze the page".  Only the "freeze" path needs to consider the need
     164             :      * to set pages all-frozen in the visibility map under this scheme.
     165             :      *
     166             :      * When we freeze a page, we generally freeze all XIDs < OldestXmin, only
     167             :      * leaving behind XIDs that are ineligible for freezing, if any.  And so
     168             :      * you might wonder why these trackers are necessary at all; why should
     169             :      * _any_ page that VACUUM freezes _ever_ be left with XIDs/MXIDs that
     170             :      * ratchet back the top-level NewRelfrozenXid/NewRelminMxid trackers?
     171             :      *
     172             :      * It is useful to use a definition of "freeze the page" that does not
     173             :      * overspecify how MultiXacts are affected.  heap_prepare_freeze_tuple
     174             :      * generally prefers to remove Multis eagerly, but lazy processing is used
     175             :      * in cases where laziness allows VACUUM to avoid allocating a new Multi.
     176             :      * The "freeze the page" trackers enable this flexibility.
     177             :      */
     178             :     TransactionId FreezePageRelfrozenXid;
     179             :     MultiXactId FreezePageRelminMxid;
     180             : 
     181             :     /*
     182             :      * "No freeze" NewRelfrozenXid/NewRelminMxid trackers.
     183             :      *
     184             :      * These trackers are maintained in the same way as the trackers used when
     185             :      * VACUUM scans a page that isn't cleanup locked.  Both code paths are
     186             :      * based on the same general idea (do less work for this page during the
     187             :      * ongoing VACUUM, at the cost of having to accept older final values).
     188             :      */
     189             :     TransactionId NoFreezePageRelfrozenXid;
     190             :     MultiXactId NoFreezePageRelminMxid;
     191             : 
     192             : } HeapPageFreeze;
     193             : 
     194             : /*
     195             :  * Per-page state returned from pruning
     196             :  */
     197             : typedef struct PruneResult
     198             : {
     199             :     int         ndeleted;       /* Number of tuples deleted from the page */
     200             :     int         nnewlpdead;     /* Number of newly LP_DEAD items */
     201             : 
     202             :     /*
     203             :      * Tuple visibility is only computed once for each tuple, for correctness
     204             :      * and efficiency reasons; see comment in heap_page_prune() for details.
     205             :      * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
     206             :      * indicate no visibility has been computed, e.g. for LP_DEAD items.
     207             :      *
     208             :      * This needs to be MaxHeapTuplesPerPage + 1 long as FirstOffsetNumber is
     209             :      * 1. Otherwise every access would need to subtract 1.
     210             :      */
     211             :     int8        htsv[MaxHeapTuplesPerPage + 1];
     212             : } PruneResult;
     213             : 
     214             : /*
     215             :  * Pruning calculates tuple visibility once and saves the results in an array
     216             :  * of int8. See PruneResult.htsv for details. This helper function is meant to
     217             :  * guard against examining visibility status array members which have not yet
     218             :  * been computed.
     219             :  */
     220             : static inline HTSV_Result
     221    15474390 : htsv_get_valid_status(int status)
     222             : {
     223             :     Assert(status >= HEAPTUPLE_DEAD &&
     224             :            status <= HEAPTUPLE_DELETE_IN_PROGRESS);
     225    15474390 :     return (HTSV_Result) status;
     226             : }
     227             : 
     228             : /* ----------------
     229             :  *      function prototypes for heap access method
     230             :  *
     231             :  * heap_create, heap_create_with_catalog, and heap_drop_with_catalog
     232             :  * are declared in catalog/heap.h
     233             :  * ----------------
     234             :  */
     235             : 
     236             : 
     237             : /*
     238             :  * HeapScanIsValid
     239             :  *      True iff the heap scan is valid.
     240             :  */
     241             : #define HeapScanIsValid(scan) PointerIsValid(scan)
     242             : 
     243             : extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
     244             :                                     int nkeys, ScanKey key,
     245             :                                     ParallelTableScanDesc parallel_scan,
     246             :                                     uint32 flags);
     247             : extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk,
     248             :                                BlockNumber numBlks);
     249             : extern void heapgetpage(TableScanDesc sscan, BlockNumber block);
     250             : extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
     251             :                         bool allow_strat, bool allow_sync, bool allow_pagemode);
     252             : extern void heap_endscan(TableScanDesc sscan);
     253             : extern HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction);
     254             : extern bool heap_getnextslot(TableScanDesc sscan,
     255             :                              ScanDirection direction, struct TupleTableSlot *slot);
     256             : extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
     257             :                               ItemPointer maxtid);
     258             : extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
     259             :                                       ScanDirection direction,
     260             :                                       TupleTableSlot *slot);
     261             : extern bool heap_fetch(Relation relation, Snapshot snapshot,
     262             :                        HeapTuple tuple, Buffer *userbuf, bool keep_buf);
     263             : extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
     264             :                                    Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
     265             :                                    bool *all_dead, bool first_call);
     266             : 
     267             : extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid);
     268             : 
     269             : extern BulkInsertState GetBulkInsertState(void);
     270             : extern void FreeBulkInsertState(BulkInsertState);
     271             : extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
     272             : 
     273             : extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
     274             :                         int options, BulkInsertState bistate);
     275             : extern void heap_multi_insert(Relation relation, struct TupleTableSlot **slots,
     276             :                               int ntuples, CommandId cid, int options,
     277             :                               BulkInsertState bistate);
     278             : extern TM_Result heap_delete(Relation relation, ItemPointer tid,
     279             :                              CommandId cid, Snapshot crosscheck, bool wait,
     280             :                              struct TM_FailureData *tmfd, bool changingPart);
     281             : extern void heap_finish_speculative(Relation relation, ItemPointer tid);
     282             : extern void heap_abort_speculative(Relation relation, ItemPointer tid);
     283             : extern TM_Result heap_update(Relation relation, ItemPointer otid,
     284             :                              HeapTuple newtup,
     285             :                              CommandId cid, Snapshot crosscheck, bool wait,
     286             :                              struct TM_FailureData *tmfd, LockTupleMode *lockmode,
     287             :                              TU_UpdateIndexes *update_indexes);
     288             : extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
     289             :                                  CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
     290             :                                  bool follow_updates,
     291             :                                  Buffer *buffer, struct TM_FailureData *tmfd);
     292             : 
     293             : extern void heap_inplace_update(Relation relation, HeapTuple tuple);
     294             : extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
     295             :                                       const struct VacuumCutoffs *cutoffs,
     296             :                                       HeapPageFreeze *pagefrz,
     297             :                                       HeapTupleFreeze *frz, bool *totally_frozen);
     298             : extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer,
     299             :                                          TransactionId snapshotConflictHorizon,
     300             :                                          HeapTupleFreeze *tuples, int ntuples);
     301             : extern bool heap_freeze_tuple(HeapTupleHeader tuple,
     302             :                               TransactionId relfrozenxid, TransactionId relminmxid,
     303             :                               TransactionId FreezeLimit, TransactionId MultiXactCutoff);
     304             : extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
     305             :                                      const struct VacuumCutoffs *cutoffs,
     306             :                                      TransactionId *NoFreezePageRelfrozenXid,
     307             :                                      MultiXactId *NoFreezePageRelminMxid);
     308             : extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
     309             : 
     310             : extern void simple_heap_insert(Relation relation, HeapTuple tup);
     311             : extern void simple_heap_delete(Relation relation, ItemPointer tid);
     312             : extern void simple_heap_update(Relation relation, ItemPointer otid,
     313             :                                HeapTuple tup, TU_UpdateIndexes *update_indexes);
     314             : 
     315             : extern TransactionId heap_index_delete_tuples(Relation rel,
     316             :                                               TM_IndexDeleteOp *delstate);
     317             : 
     318             : /* in heap/pruneheap.c */
     319             : struct GlobalVisState;
     320             : extern void heap_page_prune_opt(Relation relation, Buffer buffer);
     321             : extern void heap_page_prune(Relation relation, Buffer buffer,
     322             :                             struct GlobalVisState *vistest,
     323             :                             bool mark_unused_now,
     324             :                             PruneResult *presult,
     325             :                             OffsetNumber *off_loc);
     326             : extern void heap_page_prune_execute(Buffer buffer,
     327             :                                     OffsetNumber *redirected, int nredirected,
     328             :                                     OffsetNumber *nowdead, int ndead,
     329             :                                     OffsetNumber *nowunused, int nunused);
     330             : extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
     331             : 
     332             : /* in heap/vacuumlazy.c */
     333             : struct VacuumParams;
     334             : extern void heap_vacuum_rel(Relation rel,
     335             :                             struct VacuumParams *params, BufferAccessStrategy bstrategy);
     336             : 
     337             : /* in heap/heapam_visibility.c */
     338             : extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot,
     339             :                                          Buffer buffer);
     340             : extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
     341             :                                           Buffer buffer);
     342             : extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
     343             :                                             Buffer buffer);
     344             : extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer,
     345             :                                                    TransactionId *dead_after);
     346             : extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
     347             :                                  uint16 infomask, TransactionId xid);
     348             : extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
     349             : extern bool HeapTupleIsSurelyDead(HeapTuple htup,
     350             :                                   struct GlobalVisState *vistest);
     351             : 
     352             : /*
     353             :  * To avoid leaking too much knowledge about reorderbuffer implementation
     354             :  * details this is implemented in reorderbuffer.c not heapam_visibility.c
     355             :  */
     356             : struct HTAB;
     357             : extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
     358             :                                           Snapshot snapshot,
     359             :                                           HeapTuple htup,
     360             :                                           Buffer buffer,
     361             :                                           CommandId *cmin, CommandId *cmax);
     362             : extern void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple,
     363             :                                                 Buffer buffer, Snapshot snapshot);
     364             : 
     365             : #endif                          /* HEAPAM_H */

Generated by: LCOV version 1.14