LCOV - code coverage report
Current view: top level - contrib/amcheck - verify_nbtree.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 546 746 73.2 %
Date: 2026-01-30 05:16:55 Functions: 31 33 93.9 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * verify_nbtree.c
       4             :  *      Verifies the integrity of nbtree indexes based on invariants.
       5             :  *
       6             :  * For B-Tree indexes, verification includes checking that each page in the
       7             :  * target index has items in logical order as reported by an insertion scankey
       8             :  * (the insertion scankey sort-wise NULL semantics are needed for
       9             :  * verification).
      10             :  *
      11             :  * When index-to-heap verification is requested, a Bloom filter is used to
      12             :  * fingerprint all tuples in the target index, as the index is traversed to
      13             :  * verify its structure.  A heap scan later uses Bloom filter probes to verify
      14             :  * that every visible heap tuple has a matching index tuple.
      15             :  *
      16             :  *
      17             :  * Copyright (c) 2017-2026, PostgreSQL Global Development Group
      18             :  *
      19             :  * IDENTIFICATION
      20             :  *    contrib/amcheck/verify_nbtree.c
      21             :  *
      22             :  *-------------------------------------------------------------------------
      23             :  */
      24             : #include "postgres.h"
      25             : 
      26             : #include "access/heaptoast.h"
      27             : #include "access/htup_details.h"
      28             : #include "access/nbtree.h"
      29             : #include "access/table.h"
      30             : #include "access/tableam.h"
      31             : #include "access/transam.h"
      32             : #include "access/xact.h"
      33             : #include "verify_common.h"
      34             : #include "catalog/index.h"
      35             : #include "catalog/pg_am.h"
      36             : #include "catalog/pg_opfamily_d.h"
      37             : #include "common/pg_prng.h"
      38             : #include "lib/bloomfilter.h"
      39             : #include "miscadmin.h"
      40             : #include "storage/smgr.h"
      41             : #include "utils/guc.h"
      42             : #include "utils/memutils.h"
      43             : #include "utils/snapmgr.h"
      44             : 
      45             : 
      46         656 : PG_MODULE_MAGIC_EXT(
      47             :                     .name = "amcheck",
      48             :                     .version = PG_VERSION
      49             : );
      50             : 
      51             : /*
      52             :  * A B-Tree cannot possibly have this many levels, since there must be one
      53             :  * block per level, which is bound by the range of BlockNumber:
      54             :  */
      55             : #define InvalidBtreeLevel   ((uint32) InvalidBlockNumber)
      56             : #define BTreeTupleGetNKeyAtts(itup, rel)   \
      57             :     Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel))
      58             : 
      59             : /*
      60             :  * State associated with verifying a B-Tree index
      61             :  *
      62             :  * target is the point of reference for a verification operation.
      63             :  *
      64             :  * Other B-Tree pages may be allocated, but those are always auxiliary (e.g.,
      65             :  * they are current target's child pages).  Conceptually, problems are only
      66             :  * ever found in the current target page (or for a particular heap tuple during
      67             :  * heapallindexed verification).  Each page found by verification's left/right,
      68             :  * top/bottom scan becomes the target exactly once.
      69             :  */
      70             : typedef struct BtreeCheckState
      71             : {
      72             :     /*
      73             :      * Unchanging state, established at start of verification:
      74             :      */
      75             : 
      76             :     /* B-Tree Index Relation and associated heap relation */
      77             :     Relation    rel;
      78             :     Relation    heaprel;
      79             :     /* rel is heapkeyspace index? */
      80             :     bool        heapkeyspace;
      81             :     /* ShareLock held on heap/index, rather than AccessShareLock? */
      82             :     bool        readonly;
      83             :     /* Also verifying heap has no unindexed tuples? */
      84             :     bool        heapallindexed;
      85             :     /* Also making sure non-pivot tuples can be found by new search? */
      86             :     bool        rootdescend;
      87             :     /* Also check uniqueness constraint if index is unique */
      88             :     bool        checkunique;
      89             :     /* Per-page context */
      90             :     MemoryContext targetcontext;
      91             :     /* Buffer access strategy */
      92             :     BufferAccessStrategy checkstrategy;
      93             : 
      94             :     /*
      95             :      * Info for uniqueness checking. Fill this field and the one below once
      96             :      * per index check.
      97             :      */
      98             :     IndexInfo  *indexinfo;
      99             :     /* Table scan snapshot for heapallindexed and checkunique */
     100             :     Snapshot    snapshot;
     101             : 
     102             :     /*
     103             :      * Mutable state, for verification of particular page:
     104             :      */
     105             : 
     106             :     /* Current target page */
     107             :     Page        target;
     108             :     /* Target block number */
     109             :     BlockNumber targetblock;
     110             :     /* Target page's LSN */
     111             :     XLogRecPtr  targetlsn;
     112             : 
     113             :     /*
     114             :      * Low key: high key of left sibling of target page.  Used only for child
     115             :      * verification.  So, 'lowkey' is kept only when 'readonly' is set.
     116             :      */
     117             :     IndexTuple  lowkey;
     118             : 
     119             :     /*
     120             :      * The rightlink and incomplete split flag of block one level down to the
     121             :      * target page, which was visited last time via downlink from target page.
     122             :      * We use it to check for missing downlinks.
     123             :      */
     124             :     BlockNumber prevrightlink;
     125             :     bool        previncompletesplit;
     126             : 
     127             :     /*
     128             :      * Mutable state, for optional heapallindexed verification:
     129             :      */
     130             : 
     131             :     /* Bloom filter fingerprints B-Tree index */
     132             :     bloom_filter *filter;
     133             :     /* Debug counter */
     134             :     int64       heaptuplespresent;
     135             : } BtreeCheckState;
     136             : 
     137             : /*
     138             :  * Starting point for verifying an entire B-Tree index level
     139             :  */
     140             : typedef struct BtreeLevel
     141             : {
     142             :     /* Level number (0 is leaf page level). */
     143             :     uint32      level;
     144             : 
     145             :     /* Left most block on level.  Scan of level begins here. */
     146             :     BlockNumber leftmost;
     147             : 
     148             :     /* Is this level reported as "true" root level by meta page? */
     149             :     bool        istruerootlevel;
     150             : } BtreeLevel;
     151             : 
     152             : /*
     153             :  * Information about the last visible entry with current B-tree key.  Used
     154             :  * for validation of the unique constraint.
     155             :  */
     156             : typedef struct BtreeLastVisibleEntry
     157             : {
     158             :     BlockNumber blkno;          /* Index block */
     159             :     OffsetNumber offset;        /* Offset on index block */
     160             :     int         postingIndex;   /* Number in the posting list (-1 for
     161             :                                  * non-deduplicated tuples) */
     162             :     ItemPointer tid;            /* Heap tid */
     163             : } BtreeLastVisibleEntry;
     164             : 
     165             : /*
     166             :  * arguments for the bt_index_check_callback callback
     167             :  */
     168             : typedef struct BTCallbackState
     169             : {
     170             :     bool        parentcheck;
     171             :     bool        heapallindexed;
     172             :     bool        rootdescend;
     173             :     bool        checkunique;
     174             : } BTCallbackState;
     175             : 
     176         184 : PG_FUNCTION_INFO_V1(bt_index_check);
     177         130 : PG_FUNCTION_INFO_V1(bt_index_parent_check);
     178             : 
     179             : static void bt_index_check_callback(Relation indrel, Relation heaprel,
     180             :                                     void *state, bool readonly);
     181             : static void bt_check_every_level(Relation rel, Relation heaprel,
     182             :                                  bool heapkeyspace, bool readonly, bool heapallindexed,
     183             :                                  bool rootdescend, bool checkunique);
     184             : static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state,
     185             :                                                BtreeLevel level);
     186             : static bool bt_leftmost_ignoring_half_dead(BtreeCheckState *state,
     187             :                                            BlockNumber start,
     188             :                                            BTPageOpaque start_opaque);
     189             : static void bt_recheck_sibling_links(BtreeCheckState *state,
     190             :                                      BlockNumber btpo_prev_from_target,
     191             :                                      BlockNumber leftcurrent);
     192             : static bool heap_entry_is_visible(BtreeCheckState *state, ItemPointer tid);
     193             : static void bt_report_duplicate(BtreeCheckState *state,
     194             :                                 BtreeLastVisibleEntry *lVis,
     195             :                                 ItemPointer nexttid,
     196             :                                 BlockNumber nblock, OffsetNumber noffset,
     197             :                                 int nposting);
     198             : static void bt_entry_unique_check(BtreeCheckState *state, IndexTuple itup,
     199             :                                   BlockNumber targetblock, OffsetNumber offset,
     200             :                                   BtreeLastVisibleEntry *lVis);
     201             : static void bt_target_page_check(BtreeCheckState *state);
     202             : static BTScanInsert bt_right_page_check_scankey(BtreeCheckState *state,
     203             :                                                 OffsetNumber *rightfirstoffset);
     204             : static void bt_child_check(BtreeCheckState *state, BTScanInsert targetkey,
     205             :                            OffsetNumber downlinkoffnum);
     206             : static void bt_child_highkey_check(BtreeCheckState *state,
     207             :                                    OffsetNumber target_downlinkoffnum,
     208             :                                    Page loaded_child,
     209             :                                    uint32 target_level);
     210             : static void bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit,
     211             :                                       BlockNumber blkno, Page page);
     212             : static void bt_tuple_present_callback(Relation index, ItemPointer tid,
     213             :                                       Datum *values, bool *isnull,
     214             :                                       bool tupleIsAlive, void *checkstate);
     215             : static IndexTuple bt_normalize_tuple(BtreeCheckState *state,
     216             :                                      IndexTuple itup);
     217             : static inline IndexTuple bt_posting_plain_tuple(IndexTuple itup, int n);
     218             : static bool bt_rootdescend(BtreeCheckState *state, IndexTuple itup);
     219             : static inline bool offset_is_negative_infinity(BTPageOpaque opaque,
     220             :                                                OffsetNumber offset);
     221             : static inline bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
     222             :                                       OffsetNumber upperbound);
     223             : static inline bool invariant_leq_offset(BtreeCheckState *state,
     224             :                                         BTScanInsert key,
     225             :                                         OffsetNumber upperbound);
     226             : static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key,
     227             :                                       OffsetNumber lowerbound);
     228             : static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
     229             :                                                 BTScanInsert key,
     230             :                                                 BlockNumber nontargetblock,
     231             :                                                 Page nontarget,
     232             :                                                 OffsetNumber upperbound);
     233             : static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
     234             : static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
     235             :                                                     IndexTuple itup);
     236             : static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
     237             :                                    Page page, OffsetNumber offset);
     238             : static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
     239             :                                                       IndexTuple itup, bool nonpivot);
     240             : static inline ItemPointer BTreeTupleGetPointsToTID(IndexTuple itup);
     241             : 
     242             : /*
     243             :  * bt_index_check(index regclass, heapallindexed boolean, checkunique boolean)
     244             :  *
     245             :  * Verify integrity of B-Tree index.
     246             :  *
     247             :  * Acquires AccessShareLock on heap & index relations.  Does not consider
     248             :  * invariants that exist between parent/child pages.  Optionally verifies
     249             :  * that heap does not contain any unindexed or incorrectly indexed tuples.
     250             :  */
     251             : Datum
     252        7868 : bt_index_check(PG_FUNCTION_ARGS)
     253             : {
     254        7868 :     Oid         indrelid = PG_GETARG_OID(0);
     255             :     BTCallbackState args;
     256             : 
     257        7868 :     args.heapallindexed = false;
     258        7868 :     args.rootdescend = false;
     259        7868 :     args.parentcheck = false;
     260        7868 :     args.checkunique = false;
     261             : 
     262        7868 :     if (PG_NARGS() >= 2)
     263        7856 :         args.heapallindexed = PG_GETARG_BOOL(1);
     264        7868 :     if (PG_NARGS() >= 3)
     265        1364 :         args.checkunique = PG_GETARG_BOOL(2);
     266             : 
     267        7868 :     amcheck_lock_relation_and_check(indrelid, BTREE_AM_OID,
     268             :                                     bt_index_check_callback,
     269             :                                     AccessShareLock, &args);
     270             : 
     271        7816 :     PG_RETURN_VOID();
     272             : }
     273             : 
     274             : /*
     275             :  * bt_index_parent_check(index regclass, heapallindexed boolean, rootdescend boolean, checkunique boolean)
     276             :  *
     277             :  * Verify integrity of B-Tree index.
     278             :  *
     279             :  * Acquires ShareLock on heap & index relations.  Verifies that downlinks in
     280             :  * parent pages are valid lower bounds on child pages.  Optionally verifies
     281             :  * that heap does not contain any unindexed or incorrectly indexed tuples.
     282             :  */
     283             : Datum
     284         124 : bt_index_parent_check(PG_FUNCTION_ARGS)
     285             : {
     286         124 :     Oid         indrelid = PG_GETARG_OID(0);
     287             :     BTCallbackState args;
     288             : 
     289         124 :     args.heapallindexed = false;
     290         124 :     args.rootdescend = false;
     291         124 :     args.parentcheck = true;
     292         124 :     args.checkunique = false;
     293             : 
     294         124 :     if (PG_NARGS() >= 2)
     295         112 :         args.heapallindexed = PG_GETARG_BOOL(1);
     296         124 :     if (PG_NARGS() >= 3)
     297         104 :         args.rootdescend = PG_GETARG_BOOL(2);
     298         124 :     if (PG_NARGS() >= 4)
     299          52 :         args.checkunique = PG_GETARG_BOOL(3);
     300             : 
     301         124 :     amcheck_lock_relation_and_check(indrelid, BTREE_AM_OID,
     302             :                                     bt_index_check_callback,
     303             :                                     ShareLock, &args);
     304             : 
     305          88 :     PG_RETURN_VOID();
     306             : }
     307             : 
     308             : /*
     309             :  * Helper for bt_index_[parent_]check, coordinating the bulk of the work.
     310             :  */
     311             : static void
     312        7980 : bt_index_check_callback(Relation indrel, Relation heaprel, void *state, bool readonly)
     313             : {
     314        7980 :     BTCallbackState *args = (BTCallbackState *) state;
     315             :     bool        heapkeyspace,
     316             :                 allequalimage;
     317             : 
     318        7980 :     if (!smgrexists(RelationGetSmgr(indrel), MAIN_FORKNUM))
     319          36 :         ereport(ERROR,
     320             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     321             :                  errmsg("index \"%s\" lacks a main relation fork",
     322             :                         RelationGetRelationName(indrel))));
     323             : 
     324             :     /* Extract metadata from metapage, and sanitize it in passing */
     325        7944 :     _bt_metaversion(indrel, &heapkeyspace, &allequalimage);
     326        7944 :     if (allequalimage && !heapkeyspace)
     327           0 :         ereport(ERROR,
     328             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     329             :                  errmsg("index \"%s\" metapage has equalimage field set on unsupported nbtree version",
     330             :                         RelationGetRelationName(indrel))));
     331        7944 :     if (allequalimage && !_bt_allequalimage(indrel, false))
     332             :     {
     333           0 :         bool        has_interval_ops = false;
     334             : 
     335           0 :         for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(indrel); i++)
     336           0 :             if (indrel->rd_opfamily[i] == INTERVAL_BTREE_FAM_OID)
     337             :             {
     338           0 :                 has_interval_ops = true;
     339           0 :                 ereport(ERROR,
     340             :                         (errcode(ERRCODE_INDEX_CORRUPTED),
     341             :                          errmsg("index \"%s\" metapage incorrectly indicates that deduplication is safe",
     342             :                                 RelationGetRelationName(indrel)),
     343             :                          has_interval_ops
     344             :                          ? errhint("This is known of \"interval\" indexes last built on a version predating 2023-11.")
     345             :                          : 0));
     346             :             }
     347             :     }
     348             : 
     349             :     /* Check index, possibly against table it is an index on */
     350        7944 :     bt_check_every_level(indrel, heaprel, heapkeyspace, readonly,
     351        7944 :                          args->heapallindexed, args->rootdescend, args->checkunique);
     352        7904 : }
     353             : 
     354             : /*
     355             :  * Main entry point for B-Tree SQL-callable functions. Walks the B-Tree in
     356             :  * logical order, verifying invariants as it goes.  Optionally, verification
     357             :  * checks if the heap relation contains any tuples that are not represented in
     358             :  * the index but should be.
     359             :  *
     360             :  * It is the caller's responsibility to acquire appropriate heavyweight lock on
     361             :  * the index relation, and advise us if extra checks are safe when a ShareLock
     362             :  * is held.  (A lock of the same type must also have been acquired on the heap
     363             :  * relation.)
     364             :  *
     365             :  * A ShareLock is generally assumed to prevent any kind of physical
     366             :  * modification to the index structure, including modifications that VACUUM may
     367             :  * make.  This does not include setting of the LP_DEAD bit by concurrent index
     368             :  * scans, although that is just metadata that is not able to directly affect
     369             :  * any check performed here.  Any concurrent process that might act on the
     370             :  * LP_DEAD bit being set (recycle space) requires a heavyweight lock that
     371             :  * cannot be held while we hold a ShareLock.  (Besides, even if that could
     372             :  * happen, the ad-hoc recycling when a page might otherwise split is performed
     373             :  * per-page, and requires an exclusive buffer lock, which wouldn't cause us
     374             :  * trouble.  _bt_delitems_vacuum() may only delete leaf items, and so the extra
     375             :  * parent/child check cannot be affected.)
     376             :  */
     377             : static void
     378        7944 : bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
     379             :                      bool readonly, bool heapallindexed, bool rootdescend,
     380             :                      bool checkunique)
     381             : {
     382             :     BtreeCheckState *state;
     383             :     Page        metapage;
     384             :     BTMetaPageData *metad;
     385             :     uint32      previouslevel;
     386             :     BtreeLevel  current;
     387             : 
     388        7944 :     if (!readonly)
     389        7844 :         elog(DEBUG1, "verifying consistency of tree structure for index \"%s\"",
     390             :              RelationGetRelationName(rel));
     391             :     else
     392         100 :         elog(DEBUG1, "verifying consistency of tree structure for index \"%s\" with cross-level checks",
     393             :              RelationGetRelationName(rel));
     394             : 
     395             :     /*
     396             :      * This assertion matches the one in index_getnext_tid().  See page
     397             :      * recycling/"visible to everyone" notes in nbtree README.
     398             :      */
     399             :     Assert(TransactionIdIsValid(RecentXmin));
     400             : 
     401             :     /*
     402             :      * Initialize state for entire verification operation
     403             :      */
     404        7944 :     state = palloc0_object(BtreeCheckState);
     405        7944 :     state->rel = rel;
     406        7944 :     state->heaprel = heaprel;
     407        7944 :     state->heapkeyspace = heapkeyspace;
     408        7944 :     state->readonly = readonly;
     409        7944 :     state->heapallindexed = heapallindexed;
     410        7944 :     state->rootdescend = rootdescend;
     411        7944 :     state->checkunique = checkunique;
     412        7944 :     state->snapshot = InvalidSnapshot;
     413             : 
     414        7944 :     if (state->heapallindexed)
     415             :     {
     416             :         int64       total_pages;
     417             :         int64       total_elems;
     418             :         uint64      seed;
     419             : 
     420             :         /*
     421             :          * Size Bloom filter based on estimated number of tuples in index,
     422             :          * while conservatively assuming that each block must contain at least
     423             :          * MaxTIDsPerBTreePage / 3 "plain" tuples -- see
     424             :          * bt_posting_plain_tuple() for definition, and details of how posting
     425             :          * list tuples are handled.
     426             :          */
     427         142 :         total_pages = RelationGetNumberOfBlocks(rel);
     428         142 :         total_elems = Max(total_pages * (MaxTIDsPerBTreePage / 3),
     429             :                           (int64) state->rel->rd_rel->reltuples);
     430             :         /* Generate a random seed to avoid repetition */
     431         142 :         seed = pg_prng_uint64(&pg_global_prng_state);
     432             :         /* Create Bloom filter to fingerprint index */
     433         142 :         state->filter = bloom_create(total_elems, maintenance_work_mem, seed);
     434         142 :         state->heaptuplespresent = 0;
     435             : 
     436             :         /*
     437             :          * Register our own snapshot for heapallindexed, rather than asking
     438             :          * table_index_build_scan() to do this for us later.  This needs to
     439             :          * happen before index fingerprinting begins, so we can later be
     440             :          * certain that index fingerprinting should have reached all tuples
     441             :          * returned by table_index_build_scan().
     442             :          */
     443         142 :         state->snapshot = RegisterSnapshot(GetTransactionSnapshot());
     444             : 
     445             :         /*
     446             :          * GetTransactionSnapshot() always acquires a new MVCC snapshot in
     447             :          * READ COMMITTED mode.  A new snapshot is guaranteed to have all the
     448             :          * entries it requires in the index.
     449             :          *
     450             :          * We must defend against the possibility that an old xact snapshot
     451             :          * was returned at higher isolation levels when that snapshot is not
     452             :          * safe for index scans of the target index.  This is possible when
     453             :          * the snapshot sees tuples that are before the index's indcheckxmin
     454             :          * horizon.  Throwing an error here should be very rare.  It doesn't
     455             :          * seem worth using a secondary snapshot to avoid this.
     456             :          */
     457         142 :         if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin &&
     458           0 :             !TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data),
     459           0 :                                    state->snapshot->xmin))
     460           0 :             ereport(ERROR,
     461             :                     errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     462             :                     errmsg("index \"%s\" cannot be verified using transaction snapshot",
     463             :                            RelationGetRelationName(rel)));
     464             :     }
     465             : 
     466             :     /*
     467             :      * We need a snapshot to check the uniqueness of the index.  For better
     468             :      * performance, take it once per index check.  If one was already taken
     469             :      * above, use that.
     470             :      */
     471        7944 :     if (state->checkunique)
     472             :     {
     473        1408 :         state->indexinfo = BuildIndexInfo(state->rel);
     474             : 
     475        1408 :         if (state->indexinfo->ii_Unique && state->snapshot == InvalidSnapshot)
     476        1242 :             state->snapshot = RegisterSnapshot(GetTransactionSnapshot());
     477             :     }
     478             : 
     479             :     Assert(!state->rootdescend || state->readonly);
     480        7944 :     if (state->rootdescend && !state->heapkeyspace)
     481           0 :         ereport(ERROR,
     482             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     483             :                  errmsg("cannot verify that tuples from index \"%s\" can each be found by an independent index search",
     484             :                         RelationGetRelationName(rel)),
     485             :                  errhint("Only B-Tree version 4 indexes support rootdescend verification.")));
     486             : 
     487             :     /* Create context for page */
     488        7944 :     state->targetcontext = AllocSetContextCreate(CurrentMemoryContext,
     489             :                                                  "amcheck context",
     490             :                                                  ALLOCSET_DEFAULT_SIZES);
     491        7944 :     state->checkstrategy = GetAccessStrategy(BAS_BULKREAD);
     492             : 
     493             :     /* Get true root block from meta-page */
     494        7944 :     metapage = palloc_btree_page(state, BTREE_METAPAGE);
     495        7944 :     metad = BTPageGetMeta(metapage);
     496             : 
     497             :     /*
     498             :      * Certain deletion patterns can result in "skinny" B-Tree indexes, where
     499             :      * the fast root and true root differ.
     500             :      *
     501             :      * Start from the true root, not the fast root, unlike conventional index
     502             :      * scans.  This approach is more thorough, and removes the risk of
     503             :      * following a stale fast root from the meta page.
     504             :      */
     505        7944 :     if (metad->btm_fastroot != metad->btm_root)
     506          26 :         ereport(DEBUG1,
     507             :                 (errcode(ERRCODE_NO_DATA),
     508             :                  errmsg_internal("harmless fast root mismatch in index \"%s\"",
     509             :                                  RelationGetRelationName(rel)),
     510             :                  errdetail_internal("Fast root block %u (level %u) differs from true root block %u (level %u).",
     511             :                                     metad->btm_fastroot, metad->btm_fastlevel,
     512             :                                     metad->btm_root, metad->btm_level)));
     513             : 
     514             :     /*
     515             :      * Starting at the root, verify every level.  Move left to right, top to
     516             :      * bottom.  Note that there may be no pages other than the meta page (meta
     517             :      * page can indicate that root is P_NONE when the index is totally empty).
     518             :      */
     519        7944 :     previouslevel = InvalidBtreeLevel;
     520        7944 :     current.level = metad->btm_level;
     521        7944 :     current.leftmost = metad->btm_root;
     522        7944 :     current.istruerootlevel = true;
     523       12922 :     while (current.leftmost != P_NONE)
     524             :     {
     525             :         /*
     526             :          * Verify this level, and get left most page for next level down, if
     527             :          * not at leaf level
     528             :          */
     529        5014 :         current = bt_check_level_from_leftmost(state, current);
     530             : 
     531        4978 :         if (current.leftmost == InvalidBlockNumber)
     532           0 :             ereport(ERROR,
     533             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
     534             :                      errmsg("index \"%s\" has no valid pages on level below %u or first level",
     535             :                             RelationGetRelationName(rel), previouslevel)));
     536             : 
     537        4978 :         previouslevel = current.level;
     538             :     }
     539             : 
     540             :     /*
     541             :      * * Check whether heap contains unindexed/malformed tuples *
     542             :      */
     543        7908 :     if (state->heapallindexed)
     544             :     {
     545         128 :         IndexInfo  *indexinfo = BuildIndexInfo(state->rel);
     546             :         TableScanDesc scan;
     547             : 
     548             :         /*
     549             :          * Create our own scan for table_index_build_scan(), rather than
     550             :          * getting it to do so for us.  This is required so that we can
     551             :          * actually use the MVCC snapshot registered earlier.
     552             :          *
     553             :          * Note that table_index_build_scan() calls heap_endscan() for us.
     554             :          */
     555         128 :         scan = table_beginscan_strat(state->heaprel, /* relation */
     556             :                                      state->snapshot,    /* snapshot */
     557             :                                      0, /* number of keys */
     558             :                                      NULL,  /* scan key */
     559             :                                      true,  /* buffer access strategy OK */
     560             :                                      true); /* syncscan OK? */
     561             : 
     562             :         /*
     563             :          * Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY
     564             :          * behaves.
     565             :          *
     566             :          * It's okay that we don't actually use the same lock strength for the
     567             :          * heap relation as any other ii_Concurrent caller would.  We have no
     568             :          * reason to care about a concurrent VACUUM operation, since there
     569             :          * isn't going to be a second scan of the heap that needs to be sure
     570             :          * that there was no concurrent recycling of TIDs.
     571             :          */
     572         124 :         indexinfo->ii_Concurrent = true;
     573             : 
     574             :         /*
     575             :          * Don't wait for uncommitted tuple xact commit/abort when index is a
     576             :          * unique index on a catalog (or an index used by an exclusion
     577             :          * constraint).  This could otherwise happen in the readonly case.
     578             :          */
     579         124 :         indexinfo->ii_Unique = false;
     580         124 :         indexinfo->ii_ExclusionOps = NULL;
     581         124 :         indexinfo->ii_ExclusionProcs = NULL;
     582         124 :         indexinfo->ii_ExclusionStrats = NULL;
     583             : 
     584         124 :         elog(DEBUG1, "verifying that tuples from index \"%s\" are present in \"%s\"",
     585             :              RelationGetRelationName(state->rel),
     586             :              RelationGetRelationName(state->heaprel));
     587             : 
     588         124 :         table_index_build_scan(state->heaprel, state->rel, indexinfo, true, false,
     589             :                                bt_tuple_present_callback, state, scan);
     590             : 
     591         124 :         ereport(DEBUG1,
     592             :                 (errmsg_internal("finished verifying presence of " INT64_FORMAT " tuples from table \"%s\" with bitset %.2f%% set",
     593             :                                  state->heaptuplespresent, RelationGetRelationName(heaprel),
     594             :                                  100.0 * bloom_prop_bits_set(state->filter))));
     595             : 
     596         124 :         bloom_free(state->filter);
     597             :     }
     598             : 
     599             :     /* Be tidy: */
     600        7904 :     if (state->snapshot != InvalidSnapshot)
     601        1366 :         UnregisterSnapshot(state->snapshot);
     602        7904 :     MemoryContextDelete(state->targetcontext);
     603        7904 : }
     604             : 
     605             : /*
     606             :  * Given a left-most block at some level, move right, verifying each page
     607             :  * individually (with more verification across pages for "readonly"
     608             :  * callers).  Caller should pass the true root page as the leftmost initially,
     609             :  * working their way down by passing what is returned for the last call here
     610             :  * until level 0 (leaf page level) was reached.
     611             :  *
     612             :  * Returns state for next call, if any.  This includes left-most block number
     613             :  * one level lower that should be passed on next level/call, which is set to
     614             :  * P_NONE on last call here (when leaf level is verified).  Level numbers
     615             :  * follow the nbtree convention: higher levels have higher numbers, because new
     616             :  * levels are added only due to a root page split.  Note that prior to the
     617             :  * first root page split, the root is also a leaf page, so there is always a
     618             :  * level 0 (leaf level), and it's always the last level processed.
     619             :  *
     620             :  * Note on memory management:  State's per-page context is reset here, between
     621             :  * each call to bt_target_page_check().
     622             :  */
     623             : static BtreeLevel
     624        5014 : bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
     625             : {
     626             :     /* State to establish early, concerning entire level */
     627             :     BTPageOpaque opaque;
     628             :     MemoryContext oldcontext;
     629             :     BtreeLevel  nextleveldown;
     630             : 
     631             :     /* Variables for iterating across level using right links */
     632        5014 :     BlockNumber leftcurrent = P_NONE;
     633        5014 :     BlockNumber current = level.leftmost;
     634             : 
     635             :     /* Initialize return state */
     636        5014 :     nextleveldown.leftmost = InvalidBlockNumber;
     637        5014 :     nextleveldown.level = InvalidBtreeLevel;
     638        5014 :     nextleveldown.istruerootlevel = false;
     639             : 
     640             :     /* Use page-level context for duration of this call */
     641        5014 :     oldcontext = MemoryContextSwitchTo(state->targetcontext);
     642             : 
     643        5014 :     elog(DEBUG1, "verifying level %u%s", level.level,
     644             :          level.istruerootlevel ?
     645             :          " (true root level)" : level.level == 0 ? " (leaf level)" : "");
     646             : 
     647        5014 :     state->prevrightlink = InvalidBlockNumber;
     648        5014 :     state->previncompletesplit = false;
     649             : 
     650             :     do
     651             :     {
     652             :         /* Don't rely on CHECK_FOR_INTERRUPTS() calls at lower level */
     653       18242 :         CHECK_FOR_INTERRUPTS();
     654             : 
     655             :         /* Initialize state for this iteration */
     656       18242 :         state->targetblock = current;
     657       18242 :         state->target = palloc_btree_page(state, state->targetblock);
     658       18218 :         state->targetlsn = PageGetLSN(state->target);
     659             : 
     660       18218 :         opaque = BTPageGetOpaque(state->target);
     661             : 
     662       18218 :         if (P_IGNORE(opaque))
     663             :         {
     664             :             /*
     665             :              * Since there cannot be a concurrent VACUUM operation in readonly
     666             :              * mode, and since a page has no links within other pages
     667             :              * (siblings and parent) once it is marked fully deleted, it
     668             :              * should be impossible to land on a fully deleted page in
     669             :              * readonly mode. See bt_child_check() for further details.
     670             :              *
     671             :              * The bt_child_check() P_ISDELETED() check is repeated here so
     672             :              * that pages that are only reachable through sibling links get
     673             :              * checked.
     674             :              */
     675           0 :             if (state->readonly && P_ISDELETED(opaque))
     676           0 :                 ereport(ERROR,
     677             :                         (errcode(ERRCODE_INDEX_CORRUPTED),
     678             :                          errmsg("downlink or sibling link points to deleted block in index \"%s\"",
     679             :                                 RelationGetRelationName(state->rel)),
     680             :                          errdetail_internal("Block=%u left block=%u left link from block=%u.",
     681             :                                             current, leftcurrent, opaque->btpo_prev)));
     682             : 
     683           0 :             if (P_RIGHTMOST(opaque))
     684           0 :                 ereport(ERROR,
     685             :                         (errcode(ERRCODE_INDEX_CORRUPTED),
     686             :                          errmsg("block %u fell off the end of index \"%s\"",
     687             :                                 current, RelationGetRelationName(state->rel))));
     688             :             else
     689           0 :                 ereport(DEBUG1,
     690             :                         (errcode(ERRCODE_NO_DATA),
     691             :                          errmsg_internal("block %u of index \"%s\" concurrently deleted",
     692             :                                          current, RelationGetRelationName(state->rel))));
     693           0 :             goto nextpage;
     694             :         }
     695       18218 :         else if (nextleveldown.leftmost == InvalidBlockNumber)
     696             :         {
     697             :             /*
     698             :              * A concurrent page split could make the caller supplied leftmost
     699             :              * block no longer contain the leftmost page, or no longer be the
     700             :              * true root, but where that isn't possible due to heavyweight
     701             :              * locking, check that the first valid page meets caller's
     702             :              * expectations.
     703             :              */
     704        4990 :             if (state->readonly)
     705             :             {
     706          90 :                 if (!bt_leftmost_ignoring_half_dead(state, current, opaque))
     707           0 :                     ereport(ERROR,
     708             :                             (errcode(ERRCODE_INDEX_CORRUPTED),
     709             :                              errmsg("block %u is not leftmost in index \"%s\"",
     710             :                                     current, RelationGetRelationName(state->rel))));
     711             : 
     712          90 :                 if (level.istruerootlevel && (!P_ISROOT(opaque) && !P_INCOMPLETE_SPLIT(opaque)))
     713           0 :                     ereport(ERROR,
     714             :                             (errcode(ERRCODE_INDEX_CORRUPTED),
     715             :                              errmsg("block %u is not true root in index \"%s\"",
     716             :                                     current, RelationGetRelationName(state->rel))));
     717             :             }
     718             : 
     719             :             /*
     720             :              * Before beginning any non-trivial examination of level, prepare
     721             :              * state for next bt_check_level_from_leftmost() invocation for
     722             :              * the next level for the next level down (if any).
     723             :              *
     724             :              * There should be at least one non-ignorable page per level,
     725             :              * unless this is the leaf level, which is assumed by caller to be
     726             :              * final level.
     727             :              */
     728        4990 :             if (!P_ISLEAF(opaque))
     729             :             {
     730             :                 IndexTuple  itup;
     731             :                 ItemId      itemid;
     732             : 
     733             :                 /* Internal page -- downlink gets leftmost on next level */
     734        1110 :                 itemid = PageGetItemIdCareful(state, state->targetblock,
     735             :                                               state->target,
     736        1110 :                                               P_FIRSTDATAKEY(opaque));
     737        1110 :                 itup = (IndexTuple) PageGetItem(state->target, itemid);
     738        1110 :                 nextleveldown.leftmost = BTreeTupleGetDownLink(itup);
     739        1110 :                 nextleveldown.level = opaque->btpo_level - 1;
     740             :             }
     741             :             else
     742             :             {
     743             :                 /*
     744             :                  * Leaf page -- final level caller must process.
     745             :                  *
     746             :                  * Note that this could also be the root page, if there has
     747             :                  * been no root page split yet.
     748             :                  */
     749        3880 :                 nextleveldown.leftmost = P_NONE;
     750        3880 :                 nextleveldown.level = InvalidBtreeLevel;
     751             :             }
     752             : 
     753             :             /*
     754             :              * Finished setting up state for this call/level.  Control will
     755             :              * never end up back here in any future loop iteration for this
     756             :              * level.
     757             :              */
     758             :         }
     759             : 
     760             :         /*
     761             :          * Sibling links should be in mutual agreement.  There arises
     762             :          * leftcurrent == P_NONE && btpo_prev != P_NONE when the left sibling
     763             :          * of the parent's low-key downlink is half-dead.  (A half-dead page
     764             :          * has no downlink from its parent.)  Under heavyweight locking, the
     765             :          * last bt_leftmost_ignoring_half_dead() validated this btpo_prev.
     766             :          * Without heavyweight locking, validation of the P_NONE case remains
     767             :          * unimplemented.
     768             :          */
     769       18218 :         if (opaque->btpo_prev != leftcurrent && leftcurrent != P_NONE)
     770           0 :             bt_recheck_sibling_links(state, opaque->btpo_prev, leftcurrent);
     771             : 
     772             :         /* Check level */
     773       18218 :         if (level.level != opaque->btpo_level)
     774           0 :             ereport(ERROR,
     775             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
     776             :                      errmsg("leftmost down link for level points to block in index \"%s\" whose level is not one level down",
     777             :                             RelationGetRelationName(state->rel)),
     778             :                      errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.",
     779             :                                         current, level.level, opaque->btpo_level)));
     780             : 
     781             :         /* Verify invariants for page */
     782       18218 :         bt_target_page_check(state);
     783             : 
     784       18206 : nextpage:
     785             : 
     786             :         /* Try to detect circular links */
     787       18206 :         if (current == leftcurrent || current == opaque->btpo_prev)
     788           0 :             ereport(ERROR,
     789             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
     790             :                      errmsg("circular link chain found in block %u of index \"%s\"",
     791             :                             current, RelationGetRelationName(state->rel))));
     792             : 
     793       18206 :         leftcurrent = current;
     794       18206 :         current = opaque->btpo_next;
     795             : 
     796       18206 :         if (state->lowkey)
     797             :         {
     798             :             Assert(state->readonly);
     799        3724 :             pfree(state->lowkey);
     800        3724 :             state->lowkey = NULL;
     801             :         }
     802             : 
     803             :         /*
     804             :          * Copy current target high key as the low key of right sibling.
     805             :          * Allocate memory in upper level context, so it would be cleared
     806             :          * after reset of target context.
     807             :          *
     808             :          * We only need the low key in corner cases of checking child high
     809             :          * keys. We use high key only when incomplete split on the child level
     810             :          * falls to the boundary of pages on the target level.  See
     811             :          * bt_child_highkey_check() for details.  So, typically we won't end
     812             :          * up doing anything with low key, but it's simpler for general case
     813             :          * high key verification to always have it available.
     814             :          *
     815             :          * The correctness of managing low key in the case of concurrent
     816             :          * splits wasn't investigated yet.  Thankfully we only need low key
     817             :          * for readonly verification and concurrent splits won't happen.
     818             :          */
     819       18206 :         if (state->readonly && !P_RIGHTMOST(opaque))
     820             :         {
     821             :             IndexTuple  itup;
     822             :             ItemId      itemid;
     823             : 
     824        3724 :             itemid = PageGetItemIdCareful(state, state->targetblock,
     825             :                                           state->target, P_HIKEY);
     826        3724 :             itup = (IndexTuple) PageGetItem(state->target, itemid);
     827             : 
     828        3724 :             state->lowkey = MemoryContextAlloc(oldcontext, IndexTupleSize(itup));
     829        3724 :             memcpy(state->lowkey, itup, IndexTupleSize(itup));
     830             :         }
     831             : 
     832             :         /* Free page and associated memory for this iteration */
     833       18206 :         MemoryContextReset(state->targetcontext);
     834             :     }
     835       18206 :     while (current != P_NONE);
     836             : 
     837        4978 :     if (state->lowkey)
     838             :     {
     839             :         Assert(state->readonly);
     840           0 :         pfree(state->lowkey);
     841           0 :         state->lowkey = NULL;
     842             :     }
     843             : 
     844             :     /* Don't change context for caller */
     845        4978 :     MemoryContextSwitchTo(oldcontext);
     846             : 
     847        4978 :     return nextleveldown;
     848             : }
     849             : 
     850             : /* Check visibility of the table entry referenced by nbtree index */
     851             : static bool
     852         762 : heap_entry_is_visible(BtreeCheckState *state, ItemPointer tid)
     853             : {
     854             :     bool        tid_visible;
     855             : 
     856         762 :     TupleTableSlot *slot = table_slot_create(state->heaprel, NULL);
     857             : 
     858         762 :     tid_visible = table_tuple_fetch_row_version(state->heaprel,
     859             :                                                 tid, state->snapshot, slot);
     860         762 :     if (slot != NULL)
     861         762 :         ExecDropSingleTupleTableSlot(slot);
     862             : 
     863         762 :     return tid_visible;
     864             : }
     865             : 
     866             : /*
     867             :  * Prepare an error message for unique constrain violation in
     868             :  * a btree index and report ERROR.
     869             :  */
     870             : static void
     871           6 : bt_report_duplicate(BtreeCheckState *state,
     872             :                     BtreeLastVisibleEntry *lVis,
     873             :                     ItemPointer nexttid, BlockNumber nblock, OffsetNumber noffset,
     874             :                     int nposting)
     875             : {
     876             :     char       *htid,
     877             :                *nhtid,
     878             :                *itid,
     879           6 :                *nitid = "",
     880           6 :                *pposting = "",
     881           6 :                *pnposting = "";
     882             : 
     883           6 :     htid = psprintf("tid=(%u,%u)",
     884           6 :                     ItemPointerGetBlockNumberNoCheck(lVis->tid),
     885           6 :                     ItemPointerGetOffsetNumberNoCheck(lVis->tid));
     886           6 :     nhtid = psprintf("tid=(%u,%u)",
     887             :                      ItemPointerGetBlockNumberNoCheck(nexttid),
     888           6 :                      ItemPointerGetOffsetNumberNoCheck(nexttid));
     889           6 :     itid = psprintf("tid=(%u,%u)", lVis->blkno, lVis->offset);
     890             : 
     891           6 :     if (nblock != lVis->blkno || noffset != lVis->offset)
     892           6 :         nitid = psprintf(" tid=(%u,%u)", nblock, noffset);
     893             : 
     894           6 :     if (lVis->postingIndex >= 0)
     895           0 :         pposting = psprintf(" posting %u", lVis->postingIndex);
     896             : 
     897           6 :     if (nposting >= 0)
     898           0 :         pnposting = psprintf(" posting %u", nposting);
     899             : 
     900           6 :     ereport(ERROR,
     901             :             (errcode(ERRCODE_INDEX_CORRUPTED),
     902             :              errmsg("index uniqueness is violated for index \"%s\"",
     903             :                     RelationGetRelationName(state->rel)),
     904             :              errdetail("Index %s%s and%s%s (point to heap %s and %s) page lsn=%X/%08X.",
     905             :                        itid, pposting, nitid, pnposting, htid, nhtid,
     906             :                        LSN_FORMAT_ARGS(state->targetlsn))));
     907             : }
     908             : 
     909             : /* Check if current nbtree leaf entry complies with UNIQUE constraint */
     910             : static void
     911         730 : bt_entry_unique_check(BtreeCheckState *state, IndexTuple itup,
     912             :                       BlockNumber targetblock, OffsetNumber offset,
     913             :                       BtreeLastVisibleEntry *lVis)
     914             : {
     915             :     ItemPointer tid;
     916         730 :     bool        has_visible_entry = false;
     917             : 
     918             :     Assert(targetblock != P_NONE);
     919             : 
     920             :     /*
     921             :      * Current tuple has posting list. Report duplicate if TID of any posting
     922             :      * list entry is visible and lVis->tid is valid.
     923             :      */
     924         730 :     if (BTreeTupleIsPosting(itup))
     925             :     {
     926          96 :         for (int i = 0; i < BTreeTupleGetNPosting(itup); i++)
     927             :         {
     928          64 :             tid = BTreeTupleGetPostingN(itup, i);
     929          64 :             if (heap_entry_is_visible(state, tid))
     930             :             {
     931          32 :                 has_visible_entry = true;
     932          32 :                 if (ItemPointerIsValid(lVis->tid))
     933             :                 {
     934           0 :                     bt_report_duplicate(state,
     935             :                                         lVis,
     936             :                                         tid, targetblock,
     937             :                                         offset, i);
     938             :                 }
     939             : 
     940             :                 /*
     941             :                  * Prevent double reporting unique constraint violation
     942             :                  * between the posting list entries of the first tuple on the
     943             :                  * page after cross-page check.
     944             :                  */
     945          32 :                 if (lVis->blkno != targetblock && ItemPointerIsValid(lVis->tid))
     946           0 :                     return;
     947             : 
     948          32 :                 lVis->blkno = targetblock;
     949          32 :                 lVis->offset = offset;
     950          32 :                 lVis->postingIndex = i;
     951          32 :                 lVis->tid = tid;
     952             :             }
     953             :         }
     954             :     }
     955             : 
     956             :     /*
     957             :      * Current tuple has no posting list. If TID is visible save info about it
     958             :      * for the next comparisons in the loop in bt_target_page_check(). Report
     959             :      * duplicate if lVis->tid is already valid.
     960             :      */
     961             :     else
     962             :     {
     963         698 :         tid = BTreeTupleGetHeapTID(itup);
     964         698 :         if (heap_entry_is_visible(state, tid))
     965             :         {
     966          30 :             has_visible_entry = true;
     967          30 :             if (ItemPointerIsValid(lVis->tid))
     968             :             {
     969           6 :                 bt_report_duplicate(state,
     970             :                                     lVis,
     971             :                                     tid, targetblock,
     972             :                                     offset, -1);
     973             :             }
     974             : 
     975          24 :             lVis->blkno = targetblock;
     976          24 :             lVis->offset = offset;
     977          24 :             lVis->tid = tid;
     978          24 :             lVis->postingIndex = -1;
     979             :         }
     980             :     }
     981             : 
     982         724 :     if (!has_visible_entry &&
     983         668 :         lVis->blkno != InvalidBlockNumber &&
     984          18 :         lVis->blkno != targetblock)
     985             :     {
     986           0 :         char       *posting = "";
     987             : 
     988           0 :         if (lVis->postingIndex >= 0)
     989           0 :             posting = psprintf(" posting %u", lVis->postingIndex);
     990           0 :         ereport(DEBUG1,
     991             :                 (errcode(ERRCODE_NO_DATA),
     992             :                  errmsg("index uniqueness can not be checked for index tid=(%u,%u) in index \"%s\"",
     993             :                         targetblock, offset,
     994             :                         RelationGetRelationName(state->rel)),
     995             :                  errdetail("It doesn't have visible heap tids and key is equal to the tid=(%u,%u)%s (points to heap tid=(%u,%u)).",
     996             :                            lVis->blkno, lVis->offset, posting,
     997             :                            ItemPointerGetBlockNumberNoCheck(lVis->tid),
     998             :                            ItemPointerGetOffsetNumberNoCheck(lVis->tid)),
     999             :                  errhint("VACUUM the table and repeat the check.")));
    1000             :     }
    1001             : }
    1002             : 
    1003             : /*
    1004             :  * Like P_LEFTMOST(start_opaque), but accept an arbitrarily-long chain of
    1005             :  * half-dead, sibling-linked pages to the left.  If a half-dead page appears
    1006             :  * under state->readonly, the database exited recovery between the first-stage
    1007             :  * and second-stage WAL records of a deletion.
    1008             :  */
    1009             : static bool
    1010         112 : bt_leftmost_ignoring_half_dead(BtreeCheckState *state,
    1011             :                                BlockNumber start,
    1012             :                                BTPageOpaque start_opaque)
    1013             : {
    1014         112 :     BlockNumber reached = start_opaque->btpo_prev,
    1015         112 :                 reached_from = start;
    1016         112 :     bool        all_half_dead = true;
    1017             : 
    1018             :     /*
    1019             :      * To handle the !readonly case, we'd need to accept BTP_DELETED pages and
    1020             :      * potentially observe nbtree/README "Page deletion and backwards scans".
    1021             :      */
    1022             :     Assert(state->readonly);
    1023             : 
    1024         116 :     while (reached != P_NONE && all_half_dead)
    1025             :     {
    1026           4 :         Page        page = palloc_btree_page(state, reached);
    1027           4 :         BTPageOpaque reached_opaque = BTPageGetOpaque(page);
    1028             : 
    1029           4 :         CHECK_FOR_INTERRUPTS();
    1030             : 
    1031             :         /*
    1032             :          * Try to detect btpo_prev circular links.  _bt_unlink_halfdead_page()
    1033             :          * writes that side-links will continue to point to the siblings.
    1034             :          * Check btpo_next for that property.
    1035             :          */
    1036           4 :         all_half_dead = P_ISHALFDEAD(reached_opaque) &&
    1037           4 :             reached != start &&
    1038           8 :             reached != reached_from &&
    1039           4 :             reached_opaque->btpo_next == reached_from;
    1040           4 :         if (all_half_dead)
    1041             :         {
    1042           4 :             XLogRecPtr  pagelsn = PageGetLSN(page);
    1043             : 
    1044             :             /* pagelsn should point to an XLOG_BTREE_MARK_PAGE_HALFDEAD */
    1045           4 :             ereport(DEBUG1,
    1046             :                     (errcode(ERRCODE_NO_DATA),
    1047             :                      errmsg_internal("harmless interrupted page deletion detected in index \"%s\"",
    1048             :                                      RelationGetRelationName(state->rel)),
    1049             :                      errdetail_internal("Block=%u right block=%u page lsn=%X/%08X.",
    1050             :                                         reached, reached_from,
    1051             :                                         LSN_FORMAT_ARGS(pagelsn))));
    1052             : 
    1053           4 :             reached_from = reached;
    1054           4 :             reached = reached_opaque->btpo_prev;
    1055             :         }
    1056             : 
    1057           4 :         pfree(page);
    1058             :     }
    1059             : 
    1060         112 :     return all_half_dead;
    1061             : }
    1062             : 
    1063             : /*
    1064             :  * Raise an error when target page's left link does not point back to the
    1065             :  * previous target page, called leftcurrent here.  The leftcurrent page's
    1066             :  * right link was followed to get to the current target page, and we expect
    1067             :  * mutual agreement among leftcurrent and the current target page.  Make sure
    1068             :  * that this condition has definitely been violated in the !readonly case,
    1069             :  * where concurrent page splits are something that we need to deal with.
    1070             :  *
    1071             :  * Cross-page inconsistencies involving pages that don't agree about being
    1072             :  * siblings are known to be a particularly good indicator of corruption
    1073             :  * involving partial writes/lost updates.  The bt_right_page_check_scankey
    1074             :  * check also provides a way of detecting cross-page inconsistencies for
    1075             :  * !readonly callers, but it can only detect sibling pages that have an
    1076             :  * out-of-order keyspace, which can't catch many of the problems that we
    1077             :  * expect to catch here.
    1078             :  *
    1079             :  * The classic example of the kind of inconsistency that we can only catch
    1080             :  * with this check (when in !readonly mode) involves three sibling pages that
    1081             :  * were affected by a faulty page split at some point in the past.  The
    1082             :  * effects of the split are reflected in the original page and its new right
    1083             :  * sibling page, with a lack of any accompanying changes for the _original_
    1084             :  * right sibling page.  The original right sibling page's left link fails to
    1085             :  * point to the new right sibling page (its left link still points to the
    1086             :  * original page), even though the first phase of a page split is supposed to
    1087             :  * work as a single atomic action.  This subtle inconsistency will probably
    1088             :  * only break backwards scans in practice.
    1089             :  *
    1090             :  * Note that this is the only place where amcheck will "couple" buffer locks
    1091             :  * (and only for !readonly callers).  In general we prefer to avoid more
    1092             :  * thorough cross-page checks in !readonly mode, but it seems worth the
    1093             :  * complexity here.  Also, the performance overhead of performing lock
    1094             :  * coupling here is negligible in practice.  Control only reaches here with a
    1095             :  * non-corrupt index when there is a concurrent page split at the instant
    1096             :  * caller crossed over to target page from leftcurrent page.
    1097             :  */
    1098             : static void
    1099           0 : bt_recheck_sibling_links(BtreeCheckState *state,
    1100             :                          BlockNumber btpo_prev_from_target,
    1101             :                          BlockNumber leftcurrent)
    1102             : {
    1103             :     /* passing metapage to BTPageGetOpaque() would give irrelevant findings */
    1104             :     Assert(leftcurrent != P_NONE);
    1105             : 
    1106           0 :     if (!state->readonly)
    1107             :     {
    1108             :         Buffer      lbuf;
    1109             :         Buffer      newtargetbuf;
    1110             :         Page        page;
    1111             :         BTPageOpaque opaque;
    1112             :         BlockNumber newtargetblock;
    1113             : 
    1114             :         /* Couple locks in the usual order for nbtree:  Left to right */
    1115           0 :         lbuf = ReadBufferExtended(state->rel, MAIN_FORKNUM, leftcurrent,
    1116             :                                   RBM_NORMAL, state->checkstrategy);
    1117           0 :         LockBuffer(lbuf, BT_READ);
    1118           0 :         _bt_checkpage(state->rel, lbuf);
    1119           0 :         page = BufferGetPage(lbuf);
    1120           0 :         opaque = BTPageGetOpaque(page);
    1121           0 :         if (P_ISDELETED(opaque))
    1122             :         {
    1123             :             /*
    1124             :              * Cannot reason about concurrently deleted page -- the left link
    1125             :              * in the page to the right is expected to point to some other
    1126             :              * page to the left (not leftcurrent page).
    1127             :              *
    1128             :              * Note that we deliberately don't give up with a half-dead page.
    1129             :              */
    1130           0 :             UnlockReleaseBuffer(lbuf);
    1131           0 :             return;
    1132             :         }
    1133             : 
    1134           0 :         newtargetblock = opaque->btpo_next;
    1135             :         /* Avoid self-deadlock when newtargetblock == leftcurrent */
    1136           0 :         if (newtargetblock != leftcurrent)
    1137             :         {
    1138           0 :             newtargetbuf = ReadBufferExtended(state->rel, MAIN_FORKNUM,
    1139             :                                               newtargetblock, RBM_NORMAL,
    1140             :                                               state->checkstrategy);
    1141           0 :             LockBuffer(newtargetbuf, BT_READ);
    1142           0 :             _bt_checkpage(state->rel, newtargetbuf);
    1143           0 :             page = BufferGetPage(newtargetbuf);
    1144           0 :             opaque = BTPageGetOpaque(page);
    1145             :             /* btpo_prev_from_target may have changed; update it */
    1146           0 :             btpo_prev_from_target = opaque->btpo_prev;
    1147             :         }
    1148             :         else
    1149             :         {
    1150             :             /*
    1151             :              * leftcurrent right sibling points back to leftcurrent block.
    1152             :              * Index is corrupt.  Easiest way to handle this is to pretend
    1153             :              * that we actually read from a distinct page that has an invalid
    1154             :              * block number in its btpo_prev.
    1155             :              */
    1156           0 :             newtargetbuf = InvalidBuffer;
    1157           0 :             btpo_prev_from_target = InvalidBlockNumber;
    1158             :         }
    1159             : 
    1160             :         /*
    1161             :          * No need to check P_ISDELETED here, since new target block cannot be
    1162             :          * marked deleted as long as we hold a lock on lbuf
    1163             :          */
    1164           0 :         if (BufferIsValid(newtargetbuf))
    1165           0 :             UnlockReleaseBuffer(newtargetbuf);
    1166           0 :         UnlockReleaseBuffer(lbuf);
    1167             : 
    1168           0 :         if (btpo_prev_from_target == leftcurrent)
    1169             :         {
    1170             :             /* Report split in left sibling, not target (or new target) */
    1171           0 :             ereport(DEBUG1,
    1172             :                     (errcode(ERRCODE_INTERNAL_ERROR),
    1173             :                      errmsg_internal("harmless concurrent page split detected in index \"%s\"",
    1174             :                                      RelationGetRelationName(state->rel)),
    1175             :                      errdetail_internal("Block=%u new right sibling=%u original right sibling=%u.",
    1176             :                                         leftcurrent, newtargetblock,
    1177             :                                         state->targetblock)));
    1178           0 :             return;
    1179             :         }
    1180             : 
    1181             :         /*
    1182             :          * Index is corrupt.  Make sure that we report correct target page.
    1183             :          *
    1184             :          * This could have changed in cases where there was a concurrent page
    1185             :          * split, as well as index corruption (at least in theory).  Note that
    1186             :          * btpo_prev_from_target was already updated above.
    1187             :          */
    1188           0 :         state->targetblock = newtargetblock;
    1189             :     }
    1190             : 
    1191           0 :     ereport(ERROR,
    1192             :             (errcode(ERRCODE_INDEX_CORRUPTED),
    1193             :              errmsg("left link/right link pair in index \"%s\" not in agreement",
    1194             :                     RelationGetRelationName(state->rel)),
    1195             :              errdetail_internal("Block=%u left block=%u left link from block=%u.",
    1196             :                                 state->targetblock, leftcurrent,
    1197             :                                 btpo_prev_from_target)));
    1198             : }
    1199             : 
    1200             : /*
    1201             :  * Function performs the following checks on target page, or pages ancillary to
    1202             :  * target page:
    1203             :  *
    1204             :  * - That every "real" data item is less than or equal to the high key, which
    1205             :  *   is an upper bound on the items on the page.  Data items should be
    1206             :  *   strictly less than the high key when the page is an internal page.
    1207             :  *
    1208             :  * - That within the page, every data item is strictly less than the item
    1209             :  *   immediately to its right, if any (i.e., that the items are in order
    1210             :  *   within the page, so that the binary searches performed by index scans are
    1211             :  *   sane).
    1212             :  *
    1213             :  * - That the last data item stored on the page is strictly less than the
    1214             :  *   first data item on the page to the right (when such a first item is
    1215             :  *   available).
    1216             :  *
    1217             :  * - Various checks on the structure of tuples themselves.  For example, check
    1218             :  *   that non-pivot tuples have no truncated attributes.
    1219             :  *
    1220             :  * - For index with unique constraint make sure that only one of table entries
    1221             :  *   for equal keys is visible.
    1222             :  *
    1223             :  * Furthermore, when state passed shows ShareLock held, function also checks:
    1224             :  *
    1225             :  * - That all child pages respect strict lower bound from parent's pivot
    1226             :  *   tuple.
    1227             :  *
    1228             :  * - That downlink to block was encountered in parent where that's expected.
    1229             :  *
    1230             :  * - That high keys of child pages matches corresponding pivot keys in parent.
    1231             :  *
    1232             :  * This is also where heapallindexed callers use their Bloom filter to
    1233             :  * fingerprint IndexTuples for later table_index_build_scan() verification.
    1234             :  *
    1235             :  * Note:  Memory allocated in this routine is expected to be released by caller
    1236             :  * resetting state->targetcontext.
    1237             :  */
    1238             : static void
    1239       18218 : bt_target_page_check(BtreeCheckState *state)
    1240             : {
    1241             :     OffsetNumber offset;
    1242             :     OffsetNumber max;
    1243             :     BTPageOpaque topaque;
    1244             : 
    1245             :     /* Last visible entry info for checking indexes with unique constraint */
    1246       18218 :     BtreeLastVisibleEntry lVis = {InvalidBlockNumber, InvalidOffsetNumber, -1, NULL};
    1247             : 
    1248       18218 :     topaque = BTPageGetOpaque(state->target);
    1249       18218 :     max = PageGetMaxOffsetNumber(state->target);
    1250             : 
    1251       18218 :     elog(DEBUG2, "verifying %u items on %s block %u", max,
    1252             :          P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock);
    1253             : 
    1254             :     /*
    1255             :      * Check the number of attributes in high key. Note, rightmost page
    1256             :      * doesn't contain a high key, so nothing to check
    1257             :      */
    1258       18218 :     if (!P_RIGHTMOST(topaque))
    1259             :     {
    1260             :         ItemId      itemid;
    1261             :         IndexTuple  itup;
    1262             : 
    1263             :         /* Verify line pointer before checking tuple */
    1264       13236 :         itemid = PageGetItemIdCareful(state, state->targetblock,
    1265             :                                       state->target, P_HIKEY);
    1266       13236 :         if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
    1267             :                              P_HIKEY))
    1268             :         {
    1269           0 :             itup = (IndexTuple) PageGetItem(state->target, itemid);
    1270           0 :             ereport(ERROR,
    1271             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1272             :                      errmsg("wrong number of high key index tuple attributes in index \"%s\"",
    1273             :                             RelationGetRelationName(state->rel)),
    1274             :                      errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%08X.",
    1275             :                                         state->targetblock,
    1276             :                                         BTreeTupleGetNAtts(itup, state->rel),
    1277             :                                         P_ISLEAF(topaque) ? "heap" : "index",
    1278             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    1279             :         }
    1280             :     }
    1281             : 
    1282             :     /*
    1283             :      * Loop over page items, starting from first non-highkey item, not high
    1284             :      * key (if any).  Most tests are not performed for the "negative infinity"
    1285             :      * real item (if any).
    1286             :      */
    1287       18218 :     for (offset = P_FIRSTDATAKEY(topaque);
    1288     4076558 :          offset <= max;
    1289     4058340 :          offset = OffsetNumberNext(offset))
    1290             :     {
    1291             :         ItemId      itemid;
    1292             :         IndexTuple  itup;
    1293             :         size_t      tupsize;
    1294             :         BTScanInsert skey;
    1295             :         bool        lowersizelimit;
    1296             :         ItemPointer scantid;
    1297             : 
    1298             :         /*
    1299             :          * True if we already called bt_entry_unique_check() for the current
    1300             :          * item.  This helps to avoid visiting the heap for keys, which are
    1301             :          * anyway presented only once and can't comprise a unique violation.
    1302             :          */
    1303     4058352 :         bool        unique_checked = false;
    1304             : 
    1305     4058352 :         CHECK_FOR_INTERRUPTS();
    1306             : 
    1307     4058352 :         itemid = PageGetItemIdCareful(state, state->targetblock,
    1308             :                                       state->target, offset);
    1309     4058352 :         itup = (IndexTuple) PageGetItem(state->target, itemid);
    1310     4058352 :         tupsize = IndexTupleSize(itup);
    1311             : 
    1312             :         /*
    1313             :          * lp_len should match the IndexTuple reported length exactly, since
    1314             :          * lp_len is completely redundant in indexes, and both sources of
    1315             :          * tuple length are MAXALIGN()'d.  nbtree does not use lp_len all that
    1316             :          * frequently, and is surprisingly tolerant of corrupt lp_len fields.
    1317             :          */
    1318     4058352 :         if (tupsize != ItemIdGetLength(itemid))
    1319           0 :             ereport(ERROR,
    1320             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1321             :                      errmsg("index tuple size does not equal lp_len in index \"%s\"",
    1322             :                             RelationGetRelationName(state->rel)),
    1323             :                      errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%u page lsn=%X/%08X.",
    1324             :                                         state->targetblock, offset,
    1325             :                                         tupsize, ItemIdGetLength(itemid),
    1326             :                                         LSN_FORMAT_ARGS(state->targetlsn)),
    1327             :                      errhint("This could be a torn page problem.")));
    1328             : 
    1329             :         /* Check the number of index tuple attributes */
    1330     4058352 :         if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
    1331             :                              offset))
    1332             :         {
    1333             :             ItemPointer tid;
    1334             :             char       *itid,
    1335             :                        *htid;
    1336             : 
    1337           0 :             itid = psprintf("(%u,%u)", state->targetblock, offset);
    1338           0 :             tid = BTreeTupleGetPointsToTID(itup);
    1339           0 :             htid = psprintf("(%u,%u)",
    1340             :                             ItemPointerGetBlockNumberNoCheck(tid),
    1341           0 :                             ItemPointerGetOffsetNumberNoCheck(tid));
    1342             : 
    1343           0 :             ereport(ERROR,
    1344             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1345             :                      errmsg("wrong number of index tuple attributes in index \"%s\"",
    1346             :                             RelationGetRelationName(state->rel)),
    1347             :                      errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%08X.",
    1348             :                                         itid,
    1349             :                                         BTreeTupleGetNAtts(itup, state->rel),
    1350             :                                         P_ISLEAF(topaque) ? "heap" : "index",
    1351             :                                         htid,
    1352             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    1353             :         }
    1354             : 
    1355             :         /*
    1356             :          * Don't try to generate scankey using "negative infinity" item on
    1357             :          * internal pages. They are always truncated to zero attributes.
    1358             :          */
    1359     4058352 :         if (offset_is_negative_infinity(topaque, offset))
    1360             :         {
    1361             :             /*
    1362             :              * We don't call bt_child_check() for "negative infinity" items.
    1363             :              * But if we're performing downlink connectivity check, we do it
    1364             :              * for every item including "negative infinity" one.
    1365             :              */
    1366        1114 :             if (!P_ISLEAF(topaque) && state->readonly)
    1367             :             {
    1368          24 :                 bt_child_highkey_check(state,
    1369             :                                        offset,
    1370             :                                        NULL,
    1371             :                                        topaque->btpo_level);
    1372             :             }
    1373        1114 :             continue;
    1374             :         }
    1375             : 
    1376             :         /*
    1377             :          * Readonly callers may optionally verify that non-pivot tuples can
    1378             :          * each be found by an independent search that starts from the root.
    1379             :          * Note that we deliberately don't do individual searches for each
    1380             :          * TID, since the posting list itself is validated by other checks.
    1381             :          */
    1382     4057238 :         if (state->rootdescend && P_ISLEAF(topaque) &&
    1383      402196 :             !bt_rootdescend(state, itup))
    1384             :         {
    1385           0 :             ItemPointer tid = BTreeTupleGetPointsToTID(itup);
    1386             :             char       *itid,
    1387             :                        *htid;
    1388             : 
    1389           0 :             itid = psprintf("(%u,%u)", state->targetblock, offset);
    1390           0 :             htid = psprintf("(%u,%u)", ItemPointerGetBlockNumber(tid),
    1391           0 :                             ItemPointerGetOffsetNumber(tid));
    1392             : 
    1393           0 :             ereport(ERROR,
    1394             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1395             :                      errmsg("could not find tuple using search from root page in index \"%s\"",
    1396             :                             RelationGetRelationName(state->rel)),
    1397             :                      errdetail_internal("Index tid=%s points to heap tid=%s page lsn=%X/%08X.",
    1398             :                                         itid, htid,
    1399             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    1400             :         }
    1401             : 
    1402             :         /*
    1403             :          * If tuple is a posting list tuple, make sure posting list TIDs are
    1404             :          * in order
    1405             :          */
    1406     4057238 :         if (BTreeTupleIsPosting(itup))
    1407             :         {
    1408             :             ItemPointerData last;
    1409             :             ItemPointer current;
    1410             : 
    1411       22222 :             ItemPointerCopy(BTreeTupleGetHeapTID(itup), &last);
    1412             : 
    1413      161410 :             for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
    1414             :             {
    1415             : 
    1416      139188 :                 current = BTreeTupleGetPostingN(itup, i);
    1417             : 
    1418      139188 :                 if (ItemPointerCompare(current, &last) <= 0)
    1419             :                 {
    1420           0 :                     char       *itid = psprintf("(%u,%u)", state->targetblock, offset);
    1421             : 
    1422           0 :                     ereport(ERROR,
    1423             :                             (errcode(ERRCODE_INDEX_CORRUPTED),
    1424             :                              errmsg_internal("posting list contains misplaced TID in index \"%s\"",
    1425             :                                              RelationGetRelationName(state->rel)),
    1426             :                              errdetail_internal("Index tid=%s posting list offset=%d page lsn=%X/%08X.",
    1427             :                                                 itid, i,
    1428             :                                                 LSN_FORMAT_ARGS(state->targetlsn))));
    1429             :                 }
    1430             : 
    1431      139188 :                 ItemPointerCopy(current, &last);
    1432             :             }
    1433             :         }
    1434             : 
    1435             :         /* Build insertion scankey for current page offset */
    1436     4057238 :         skey = bt_mkscankey_pivotsearch(state->rel, itup);
    1437             : 
    1438             :         /*
    1439             :          * Make sure tuple size does not exceed the relevant BTREE_VERSION
    1440             :          * specific limit.
    1441             :          *
    1442             :          * BTREE_VERSION 4 (which introduced heapkeyspace rules) requisitioned
    1443             :          * a small amount of space from BTMaxItemSize() in order to ensure
    1444             :          * that suffix truncation always has enough space to add an explicit
    1445             :          * heap TID back to a tuple -- we pessimistically assume that every
    1446             :          * newly inserted tuple will eventually need to have a heap TID
    1447             :          * appended during a future leaf page split, when the tuple becomes
    1448             :          * the basis of the new high key (pivot tuple) for the leaf page.
    1449             :          *
    1450             :          * Since the reclaimed space is reserved for that purpose, we must not
    1451             :          * enforce the slightly lower limit when the extra space has been used
    1452             :          * as intended.  In other words, there is only a cross-version
    1453             :          * difference in the limit on tuple size within leaf pages.
    1454             :          *
    1455             :          * Still, we're particular about the details within BTREE_VERSION 4
    1456             :          * internal pages.  Pivot tuples may only use the extra space for its
    1457             :          * designated purpose.  Enforce the lower limit for pivot tuples when
    1458             :          * an explicit heap TID isn't actually present. (In all other cases
    1459             :          * suffix truncation is guaranteed to generate a pivot tuple that's no
    1460             :          * larger than the firstright tuple provided to it by its caller.)
    1461             :          */
    1462     8114476 :         lowersizelimit = skey->heapkeyspace &&
    1463     4057238 :             (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL);
    1464     4057238 :         if (tupsize > (lowersizelimit ? BTMaxItemSize : BTMaxItemSizeNoHeapTid))
    1465             :         {
    1466           0 :             ItemPointer tid = BTreeTupleGetPointsToTID(itup);
    1467             :             char       *itid,
    1468             :                        *htid;
    1469             : 
    1470           0 :             itid = psprintf("(%u,%u)", state->targetblock, offset);
    1471           0 :             htid = psprintf("(%u,%u)",
    1472             :                             ItemPointerGetBlockNumberNoCheck(tid),
    1473           0 :                             ItemPointerGetOffsetNumberNoCheck(tid));
    1474             : 
    1475           0 :             ereport(ERROR,
    1476             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1477             :                      errmsg("index row size %zu exceeds maximum for index \"%s\"",
    1478             :                             tupsize, RelationGetRelationName(state->rel)),
    1479             :                      errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%08X.",
    1480             :                                         itid,
    1481             :                                         P_ISLEAF(topaque) ? "heap" : "index",
    1482             :                                         htid,
    1483             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    1484             :         }
    1485             : 
    1486             :         /* Fingerprint leaf page tuples (those that point to the heap) */
    1487     4057238 :         if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
    1488             :         {
    1489             :             IndexTuple  norm;
    1490             : 
    1491     1014016 :             if (BTreeTupleIsPosting(itup))
    1492             :             {
    1493             :                 /* Fingerprint all elements as distinct "plain" tuples */
    1494       55240 :                 for (int i = 0; i < BTreeTupleGetNPosting(itup); i++)
    1495             :                 {
    1496             :                     IndexTuple  logtuple;
    1497             : 
    1498       54854 :                     logtuple = bt_posting_plain_tuple(itup, i);
    1499       54854 :                     norm = bt_normalize_tuple(state, logtuple);
    1500       54854 :                     bloom_add_element(state->filter, (unsigned char *) norm,
    1501             :                                       IndexTupleSize(norm));
    1502             :                     /* Be tidy */
    1503       54854 :                     if (norm != logtuple)
    1504           4 :                         pfree(norm);
    1505       54854 :                     pfree(logtuple);
    1506             :                 }
    1507             :             }
    1508             :             else
    1509             :             {
    1510     1013630 :                 norm = bt_normalize_tuple(state, itup);
    1511     1013630 :                 bloom_add_element(state->filter, (unsigned char *) norm,
    1512             :                                   IndexTupleSize(norm));
    1513             :                 /* Be tidy */
    1514     1013630 :                 if (norm != itup)
    1515           2 :                     pfree(norm);
    1516             :             }
    1517             :         }
    1518             : 
    1519             :         /*
    1520             :          * * High key check *
    1521             :          *
    1522             :          * If there is a high key (if this is not the rightmost page on its
    1523             :          * entire level), check that high key actually is upper bound on all
    1524             :          * page items.  If this is a posting list tuple, we'll need to set
    1525             :          * scantid to be highest TID in posting list.
    1526             :          *
    1527             :          * We prefer to check all items against high key rather than checking
    1528             :          * just the last and trusting that the operator class obeys the
    1529             :          * transitive law (which implies that all previous items also
    1530             :          * respected the high key invariant if they pass the item order
    1531             :          * check).
    1532             :          *
    1533             :          * Ideally, we'd compare every item in the index against every other
    1534             :          * item in the index, and not trust opclass obedience of the
    1535             :          * transitive law to bridge the gap between children and their
    1536             :          * grandparents (as well as great-grandparents, and so on).  We don't
    1537             :          * go to those lengths because that would be prohibitively expensive,
    1538             :          * and probably not markedly more effective in practice.
    1539             :          *
    1540             :          * On the leaf level, we check that the key is <= the highkey.
    1541             :          * However, on non-leaf levels we check that the key is < the highkey,
    1542             :          * because the high key is "just another separator" rather than a copy
    1543             :          * of some existing key item; we expect it to be unique among all keys
    1544             :          * on the same level.  (Suffix truncation will sometimes produce a
    1545             :          * leaf highkey that is an untruncated copy of the lastleft item, but
    1546             :          * never any other item, which necessitates weakening the leaf level
    1547             :          * check to <=.)
    1548             :          *
    1549             :          * Full explanation for why a highkey is never truly a copy of another
    1550             :          * item from the same level on internal levels:
    1551             :          *
    1552             :          * While the new left page's high key is copied from the first offset
    1553             :          * on the right page during an internal page split, that's not the
    1554             :          * full story.  In effect, internal pages are split in the middle of
    1555             :          * the firstright tuple, not between the would-be lastleft and
    1556             :          * firstright tuples: the firstright key ends up on the left side as
    1557             :          * left's new highkey, and the firstright downlink ends up on the
    1558             :          * right side as right's new "negative infinity" item.  The negative
    1559             :          * infinity tuple is truncated to zero attributes, so we're only left
    1560             :          * with the downlink.  In other words, the copying is just an
    1561             :          * implementation detail of splitting in the middle of a (pivot)
    1562             :          * tuple. (See also: "Notes About Data Representation" in the nbtree
    1563             :          * README.)
    1564             :          */
    1565     4057238 :         scantid = skey->scantid;
    1566     4057238 :         if (state->heapkeyspace && BTreeTupleIsPosting(itup))
    1567       22222 :             skey->scantid = BTreeTupleGetMaxHeapTID(itup);
    1568             : 
    1569     7756652 :         if (!P_RIGHTMOST(topaque) &&
    1570     3699414 :             !(P_ISLEAF(topaque) ? invariant_leq_offset(state, skey, P_HIKEY) :
    1571        1132 :               invariant_l_offset(state, skey, P_HIKEY)))
    1572             :         {
    1573           0 :             ItemPointer tid = BTreeTupleGetPointsToTID(itup);
    1574             :             char       *itid,
    1575             :                        *htid;
    1576             : 
    1577           0 :             itid = psprintf("(%u,%u)", state->targetblock, offset);
    1578           0 :             htid = psprintf("(%u,%u)",
    1579             :                             ItemPointerGetBlockNumberNoCheck(tid),
    1580           0 :                             ItemPointerGetOffsetNumberNoCheck(tid));
    1581             : 
    1582           0 :             ereport(ERROR,
    1583             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1584             :                      errmsg("high key invariant violated for index \"%s\"",
    1585             :                             RelationGetRelationName(state->rel)),
    1586             :                      errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%08X.",
    1587             :                                         itid,
    1588             :                                         P_ISLEAF(topaque) ? "heap" : "index",
    1589             :                                         htid,
    1590             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    1591             :         }
    1592             :         /* Reset, in case scantid was set to (itup) posting tuple's max TID */
    1593     4057238 :         skey->scantid = scantid;
    1594             : 
    1595             :         /*
    1596             :          * * Item order check *
    1597             :          *
    1598             :          * Check that items are stored on page in logical order, by checking
    1599             :          * current item is strictly less than next item (if any).
    1600             :          */
    1601     4057238 :         if (OffsetNumberNext(offset) <= max &&
    1602     4039036 :             !invariant_l_offset(state, skey, OffsetNumberNext(offset)))
    1603             :         {
    1604             :             ItemPointer tid;
    1605             :             char       *itid,
    1606             :                        *htid,
    1607             :                        *nitid,
    1608             :                        *nhtid;
    1609             : 
    1610           6 :             itid = psprintf("(%u,%u)", state->targetblock, offset);
    1611           6 :             tid = BTreeTupleGetPointsToTID(itup);
    1612           6 :             htid = psprintf("(%u,%u)",
    1613             :                             ItemPointerGetBlockNumberNoCheck(tid),
    1614           6 :                             ItemPointerGetOffsetNumberNoCheck(tid));
    1615           6 :             nitid = psprintf("(%u,%u)", state->targetblock,
    1616           6 :                              OffsetNumberNext(offset));
    1617             : 
    1618             :             /* Reuse itup to get pointed-to heap location of second item */
    1619           6 :             itemid = PageGetItemIdCareful(state, state->targetblock,
    1620             :                                           state->target,
    1621           6 :                                           OffsetNumberNext(offset));
    1622           6 :             itup = (IndexTuple) PageGetItem(state->target, itemid);
    1623           6 :             tid = BTreeTupleGetPointsToTID(itup);
    1624           6 :             nhtid = psprintf("(%u,%u)",
    1625             :                              ItemPointerGetBlockNumberNoCheck(tid),
    1626           6 :                              ItemPointerGetOffsetNumberNoCheck(tid));
    1627             : 
    1628           6 :             ereport(ERROR,
    1629             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1630             :                      errmsg("item order invariant violated for index \"%s\"",
    1631             :                             RelationGetRelationName(state->rel)),
    1632             :                      errdetail_internal("Lower index tid=%s (points to %s tid=%s) higher index tid=%s (points to %s tid=%s) page lsn=%X/%08X.",
    1633             :                                         itid,
    1634             :                                         P_ISLEAF(topaque) ? "heap" : "index",
    1635             :                                         htid,
    1636             :                                         nitid,
    1637             :                                         P_ISLEAF(topaque) ? "heap" : "index",
    1638             :                                         nhtid,
    1639             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    1640             :         }
    1641             : 
    1642             :         /*
    1643             :          * If the index is unique verify entries uniqueness by checking the
    1644             :          * heap tuples visibility.  Immediately check posting tuples and
    1645             :          * tuples with repeated keys.  Postpone check for keys, which have the
    1646             :          * first appearance.
    1647             :          */
    1648     4057232 :         if (state->checkunique && state->indexinfo->ii_Unique &&
    1649      582758 :             P_ISLEAF(topaque) && !skey->anynullkeys &&
    1650      581536 :             (BTreeTupleIsPosting(itup) || ItemPointerIsValid(lVis.tid)))
    1651             :         {
    1652          56 :             bt_entry_unique_check(state, itup, state->targetblock, offset,
    1653             :                                   &lVis);
    1654          50 :             unique_checked = true;
    1655             :         }
    1656             : 
    1657     4057226 :         if (state->checkunique && state->indexinfo->ii_Unique &&
    1658      291968 :             P_ISLEAF(topaque) && OffsetNumberNext(offset) <= max)
    1659             :         {
    1660             :             /* Save current scankey tid */
    1661      289336 :             scantid = skey->scantid;
    1662             : 
    1663             :             /*
    1664             :              * Invalidate scankey tid to make _bt_compare compare only keys in
    1665             :              * the item to report equality even if heap TIDs are different
    1666             :              */
    1667      289336 :             skey->scantid = NULL;
    1668             : 
    1669             :             /*
    1670             :              * If next key tuple is different, invalidate last visible entry
    1671             :              * data (whole index tuple or last posting in index tuple). Key
    1672             :              * containing null value does not violate unique constraint and
    1673             :              * treated as different to any other key.
    1674             :              *
    1675             :              * If the next key is the same as the previous one, do the
    1676             :              * bt_entry_unique_check() call if it was postponed.
    1677             :              */
    1678      289336 :             if (_bt_compare(state->rel, skey, state->target,
    1679      290150 :                             OffsetNumberNext(offset)) != 0 || skey->anynullkeys)
    1680             :             {
    1681      288662 :                 lVis.blkno = InvalidBlockNumber;
    1682      288662 :                 lVis.offset = InvalidOffsetNumber;
    1683      288662 :                 lVis.postingIndex = -1;
    1684      288662 :                 lVis.tid = NULL;
    1685             :             }
    1686         674 :             else if (!unique_checked)
    1687             :             {
    1688         674 :                 bt_entry_unique_check(state, itup, state->targetblock, offset,
    1689             :                                       &lVis);
    1690             :             }
    1691      289336 :             skey->scantid = scantid; /* Restore saved scan key state */
    1692             :         }
    1693             : 
    1694             :         /*
    1695             :          * * Last item check *
    1696             :          *
    1697             :          * Check last item against next/right page's first data item's when
    1698             :          * last item on page is reached.  This additional check will detect
    1699             :          * transposed pages iff the supposed right sibling page happens to
    1700             :          * belong before target in the key space.  (Otherwise, a subsequent
    1701             :          * heap verification will probably detect the problem.)
    1702             :          *
    1703             :          * This check is similar to the item order check that will have
    1704             :          * already been performed for every other "real" item on target page
    1705             :          * when last item is checked.  The difference is that the next item
    1706             :          * (the item that is compared to target's last item) needs to come
    1707             :          * from the next/sibling page.  There may not be such an item
    1708             :          * available from sibling for various reasons, though (e.g., target is
    1709             :          * the rightmost page on level).
    1710             :          */
    1711     4057226 :         if (offset == max)
    1712             :         {
    1713             :             BTScanInsert rightkey;
    1714             : 
    1715             :             /* first offset on a right index page (log only) */
    1716       18202 :             OffsetNumber rightfirstoffset = InvalidOffsetNumber;
    1717             : 
    1718             :             /* Get item in next/right page */
    1719       18202 :             rightkey = bt_right_page_check_scankey(state, &rightfirstoffset);
    1720             : 
    1721       18202 :             if (rightkey &&
    1722       13228 :                 !invariant_g_offset(state, rightkey, max))
    1723             :             {
    1724             :                 /*
    1725             :                  * As explained at length in bt_right_page_check_scankey(),
    1726             :                  * there is a known !readonly race that could account for
    1727             :                  * apparent violation of invariant, which we must check for
    1728             :                  * before actually proceeding with raising error.  Our canary
    1729             :                  * condition is that target page was deleted.
    1730             :                  */
    1731           0 :                 if (!state->readonly)
    1732             :                 {
    1733             :                     /* Get fresh copy of target page */
    1734           0 :                     state->target = palloc_btree_page(state, state->targetblock);
    1735             :                     /* Note that we deliberately do not update target LSN */
    1736           0 :                     topaque = BTPageGetOpaque(state->target);
    1737             : 
    1738             :                     /*
    1739             :                      * All !readonly checks now performed; just return
    1740             :                      */
    1741           0 :                     if (P_IGNORE(topaque))
    1742           0 :                         return;
    1743             :                 }
    1744             : 
    1745           0 :                 ereport(ERROR,
    1746             :                         (errcode(ERRCODE_INDEX_CORRUPTED),
    1747             :                          errmsg("cross page item order invariant violated for index \"%s\"",
    1748             :                                 RelationGetRelationName(state->rel)),
    1749             :                          errdetail_internal("Last item on page tid=(%u,%u) page lsn=%X/%08X.",
    1750             :                                             state->targetblock, offset,
    1751             :                                             LSN_FORMAT_ARGS(state->targetlsn))));
    1752             :             }
    1753             : 
    1754             :             /*
    1755             :              * If index has unique constraint make sure that no more than one
    1756             :              * found equal items is visible.
    1757             :              */
    1758       18202 :             if (state->checkunique && state->indexinfo->ii_Unique &&
    1759        1026 :                 rightkey && P_ISLEAF(topaque) && !P_RIGHTMOST(topaque))
    1760             :             {
    1761        1026 :                 BlockNumber rightblock_number = topaque->btpo_next;
    1762             : 
    1763        1026 :                 elog(DEBUG2, "check cross page unique condition");
    1764             : 
    1765             :                 /*
    1766             :                  * Make _bt_compare compare only index keys without heap TIDs.
    1767             :                  * rightkey->scantid is modified destructively but it is ok
    1768             :                  * for it is not used later.
    1769             :                  */
    1770        1026 :                 rightkey->scantid = NULL;
    1771             : 
    1772             :                 /* The first key on the next page is the same */
    1773        1026 :                 if (_bt_compare(state->rel, rightkey, state->target, max) == 0 &&
    1774          14 :                     !rightkey->anynullkeys)
    1775             :                 {
    1776             :                     Page        rightpage;
    1777             : 
    1778             :                     /*
    1779             :                      * Do the bt_entry_unique_check() call if it was
    1780             :                      * postponed.
    1781             :                      */
    1782           0 :                     if (!unique_checked)
    1783           0 :                         bt_entry_unique_check(state, itup, state->targetblock,
    1784             :                                               offset, &lVis);
    1785             : 
    1786           0 :                     elog(DEBUG2, "cross page equal keys");
    1787           0 :                     rightpage = palloc_btree_page(state,
    1788             :                                                   rightblock_number);
    1789           0 :                     topaque = BTPageGetOpaque(rightpage);
    1790             : 
    1791           0 :                     if (P_IGNORE(topaque))
    1792             :                     {
    1793           0 :                         pfree(rightpage);
    1794           0 :                         break;
    1795             :                     }
    1796             : 
    1797           0 :                     if (unlikely(!P_ISLEAF(topaque)))
    1798           0 :                         ereport(ERROR,
    1799             :                                 (errcode(ERRCODE_INDEX_CORRUPTED),
    1800             :                                  errmsg("right block of leaf block is non-leaf for index \"%s\"",
    1801             :                                         RelationGetRelationName(state->rel)),
    1802             :                                  errdetail_internal("Block=%u page lsn=%X/%08X.",
    1803             :                                                     state->targetblock,
    1804             :                                                     LSN_FORMAT_ARGS(state->targetlsn))));
    1805             : 
    1806           0 :                     itemid = PageGetItemIdCareful(state, rightblock_number,
    1807             :                                                   rightpage,
    1808             :                                                   rightfirstoffset);
    1809           0 :                     itup = (IndexTuple) PageGetItem(rightpage, itemid);
    1810             : 
    1811           0 :                     bt_entry_unique_check(state, itup, rightblock_number, rightfirstoffset, &lVis);
    1812             : 
    1813           0 :                     pfree(rightpage);
    1814             :                 }
    1815             :             }
    1816             :         }
    1817             : 
    1818             :         /*
    1819             :          * * Downlink check *
    1820             :          *
    1821             :          * Additional check of child items iff this is an internal page and
    1822             :          * caller holds a ShareLock.  This happens for every downlink (item)
    1823             :          * in target excluding the negative-infinity downlink (again, this is
    1824             :          * because it has no useful value to compare).
    1825             :          */
    1826     4057226 :         if (!P_ISLEAF(topaque) && state->readonly)
    1827        3722 :             bt_child_check(state, skey, offset);
    1828             :     }
    1829             : 
    1830             :     /*
    1831             :      * Special case bt_child_highkey_check() call
    1832             :      *
    1833             :      * We don't pass a real downlink, but we've to finish the level
    1834             :      * processing. If condition is satisfied, we've already processed all the
    1835             :      * downlinks from the target level.  But there still might be pages to the
    1836             :      * right of the child page pointer to by our rightmost downlink.  And they
    1837             :      * might have missing downlinks.  This final call checks for them.
    1838             :      */
    1839       18206 :     if (!P_ISLEAF(topaque) && P_RIGHTMOST(topaque) && state->readonly)
    1840             :     {
    1841          22 :         bt_child_highkey_check(state, InvalidOffsetNumber,
    1842             :                                NULL, topaque->btpo_level);
    1843             :     }
    1844             : }
    1845             : 
    1846             : /*
    1847             :  * Return a scankey for an item on page to right of current target (or the
    1848             :  * first non-ignorable page), sufficient to check ordering invariant on last
    1849             :  * item in current target page.  Returned scankey relies on local memory
    1850             :  * allocated for the child page, which caller cannot pfree().  Caller's memory
    1851             :  * context should be reset between calls here.
    1852             :  *
    1853             :  * This is the first data item, and so all adjacent items are checked against
    1854             :  * their immediate sibling item (which may be on a sibling page, or even a
    1855             :  * "cousin" page at parent boundaries where target's rightlink points to page
    1856             :  * with different parent page).  If no such valid item is available, return
    1857             :  * NULL instead.
    1858             :  *
    1859             :  * Note that !readonly callers must reverify that target page has not
    1860             :  * been concurrently deleted.
    1861             :  *
    1862             :  * Save rightfirstoffset for detailed error message.
    1863             :  */
    1864             : static BTScanInsert
    1865       18202 : bt_right_page_check_scankey(BtreeCheckState *state, OffsetNumber *rightfirstoffset)
    1866             : {
    1867             :     BTPageOpaque opaque;
    1868             :     ItemId      rightitem;
    1869             :     IndexTuple  firstitup;
    1870             :     BlockNumber targetnext;
    1871             :     Page        rightpage;
    1872             :     OffsetNumber nline;
    1873             : 
    1874             :     /* Determine target's next block number */
    1875       18202 :     opaque = BTPageGetOpaque(state->target);
    1876             : 
    1877             :     /* If target is already rightmost, no right sibling; nothing to do here */
    1878       18202 :     if (P_RIGHTMOST(opaque))
    1879        4974 :         return NULL;
    1880             : 
    1881             :     /*
    1882             :      * General notes on concurrent page splits and page deletion:
    1883             :      *
    1884             :      * Routines like _bt_search() don't require *any* page split interlock
    1885             :      * when descending the tree, including something very light like a buffer
    1886             :      * pin. That's why it's okay that we don't either.  This avoidance of any
    1887             :      * need to "couple" buffer locks is the raison d' etre of the Lehman & Yao
    1888             :      * algorithm, in fact.
    1889             :      *
    1890             :      * That leaves deletion.  A deleted page won't actually be recycled by
    1891             :      * VACUUM early enough for us to fail to at least follow its right link
    1892             :      * (or left link, or downlink) and find its sibling, because recycling
    1893             :      * does not occur until no possible index scan could land on the page.
    1894             :      * Index scans can follow links with nothing more than their snapshot as
    1895             :      * an interlock and be sure of at least that much.  (See page
    1896             :      * recycling/"visible to everyone" notes in nbtree README.)
    1897             :      *
    1898             :      * Furthermore, it's okay if we follow a rightlink and find a half-dead or
    1899             :      * dead (ignorable) page one or more times.  There will either be a
    1900             :      * further right link to follow that leads to a live page before too long
    1901             :      * (before passing by parent's rightmost child), or we will find the end
    1902             :      * of the entire level instead (possible when parent page is itself the
    1903             :      * rightmost on its level).
    1904             :      */
    1905       13228 :     targetnext = opaque->btpo_next;
    1906             :     for (;;)
    1907             :     {
    1908       13228 :         CHECK_FOR_INTERRUPTS();
    1909             : 
    1910       13228 :         rightpage = palloc_btree_page(state, targetnext);
    1911       13228 :         opaque = BTPageGetOpaque(rightpage);
    1912             : 
    1913       13228 :         if (!P_IGNORE(opaque) || P_RIGHTMOST(opaque))
    1914             :             break;
    1915             : 
    1916             :         /*
    1917             :          * We landed on a deleted or half-dead sibling page.  Step right until
    1918             :          * we locate a live sibling page.
    1919             :          */
    1920           0 :         ereport(DEBUG2,
    1921             :                 (errcode(ERRCODE_NO_DATA),
    1922             :                  errmsg_internal("level %u sibling page in block %u of index \"%s\" was found deleted or half dead",
    1923             :                                  opaque->btpo_level, targetnext, RelationGetRelationName(state->rel)),
    1924             :                  errdetail_internal("Deleted page found when building scankey from right sibling.")));
    1925             : 
    1926           0 :         targetnext = opaque->btpo_next;
    1927             : 
    1928             :         /* Be slightly more pro-active in freeing this memory, just in case */
    1929           0 :         pfree(rightpage);
    1930             :     }
    1931             : 
    1932             :     /*
    1933             :      * No ShareLock held case -- why it's safe to proceed.
    1934             :      *
    1935             :      * Problem:
    1936             :      *
    1937             :      * We must avoid false positive reports of corruption when caller treats
    1938             :      * item returned here as an upper bound on target's last item.  In
    1939             :      * general, false positives are disallowed.  Avoiding them here when
    1940             :      * caller is !readonly is subtle.
    1941             :      *
    1942             :      * A concurrent page deletion by VACUUM of the target page can result in
    1943             :      * the insertion of items on to this right sibling page that would
    1944             :      * previously have been inserted on our target page.  There might have
    1945             :      * been insertions that followed the target's downlink after it was made
    1946             :      * to point to right sibling instead of target by page deletion's first
    1947             :      * phase. The inserters insert items that would belong on target page.
    1948             :      * This race is very tight, but it's possible.  This is our only problem.
    1949             :      *
    1950             :      * Non-problems:
    1951             :      *
    1952             :      * We are not hindered by a concurrent page split of the target; we'll
    1953             :      * never land on the second half of the page anyway.  A concurrent split
    1954             :      * of the right page will also not matter, because the first data item
    1955             :      * remains the same within the left half, which we'll reliably land on. If
    1956             :      * we had to skip over ignorable/deleted pages, it cannot matter because
    1957             :      * their key space has already been atomically merged with the first
    1958             :      * non-ignorable page we eventually find (doesn't matter whether the page
    1959             :      * we eventually find is a true sibling or a cousin of target, which we go
    1960             :      * into below).
    1961             :      *
    1962             :      * Solution:
    1963             :      *
    1964             :      * Caller knows that it should reverify that target is not ignorable
    1965             :      * (half-dead or deleted) when cross-page sibling item comparison appears
    1966             :      * to indicate corruption (invariant fails).  This detects the single race
    1967             :      * condition that exists for caller.  This is correct because the
    1968             :      * continued existence of target block as non-ignorable (not half-dead or
    1969             :      * deleted) implies that target page was not merged into from the right by
    1970             :      * deletion; the key space at or after target never moved left.  Target's
    1971             :      * parent either has the same downlink to target as before, or a <
    1972             :      * downlink due to deletion at the left of target.  Target either has the
    1973             :      * same highkey as before, or a highkey < before when there is a page
    1974             :      * split. (The rightmost concurrently-split-from-target-page page will
    1975             :      * still have the same highkey as target was originally found to have,
    1976             :      * which for our purposes is equivalent to target's highkey itself never
    1977             :      * changing, since we reliably skip over
    1978             :      * concurrently-split-from-target-page pages.)
    1979             :      *
    1980             :      * In simpler terms, we allow that the key space of the target may expand
    1981             :      * left (the key space can move left on the left side of target only), but
    1982             :      * the target key space cannot expand right and get ahead of us without
    1983             :      * our detecting it.  The key space of the target cannot shrink, unless it
    1984             :      * shrinks to zero due to the deletion of the original page, our canary
    1985             :      * condition.  (To be very precise, we're a bit stricter than that because
    1986             :      * it might just have been that the target page split and only the
    1987             :      * original target page was deleted.  We can be more strict, just not more
    1988             :      * lax.)
    1989             :      *
    1990             :      * Top level tree walk caller moves on to next page (makes it the new
    1991             :      * target) following recovery from this race.  (cf.  The rationale for
    1992             :      * child/downlink verification needing a ShareLock within
    1993             :      * bt_child_check(), where page deletion is also the main source of
    1994             :      * trouble.)
    1995             :      *
    1996             :      * Note that it doesn't matter if right sibling page here is actually a
    1997             :      * cousin page, because in order for the key space to be readjusted in a
    1998             :      * way that causes us issues in next level up (guiding problematic
    1999             :      * concurrent insertions to the cousin from the grandparent rather than to
    2000             :      * the sibling from the parent), there'd have to be page deletion of
    2001             :      * target's parent page (affecting target's parent's downlink in target's
    2002             :      * grandparent page).  Internal page deletion only occurs when there are
    2003             :      * no child pages (they were all fully deleted), and caller is checking
    2004             :      * that the target's parent has at least one non-deleted (so
    2005             :      * non-ignorable) child: the target page.  (Note that the first phase of
    2006             :      * deletion atomically marks the page to be deleted half-dead/ignorable at
    2007             :      * the same time downlink in its parent is removed, so caller will
    2008             :      * definitely not fail to detect that this happened.)
    2009             :      *
    2010             :      * This trick is inspired by the method backward scans use for dealing
    2011             :      * with concurrent page splits; concurrent page deletion is a problem that
    2012             :      * similarly receives special consideration sometimes (it's possible that
    2013             :      * the backwards scan will re-read its "original" block after failing to
    2014             :      * find a right-link to it, having already moved in the opposite direction
    2015             :      * (right/"forwards") a few times to try to locate one).  Just like us,
    2016             :      * that happens only to determine if there was a concurrent page deletion
    2017             :      * of a reference page, and just like us if there was a page deletion of
    2018             :      * that reference page it means we can move on from caring about the
    2019             :      * reference page.  See the nbtree README for a full description of how
    2020             :      * that works.
    2021             :      */
    2022       13228 :     nline = PageGetMaxOffsetNumber(rightpage);
    2023             : 
    2024             :     /*
    2025             :      * Get first data item, if any
    2026             :      */
    2027       13228 :     if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
    2028             :     {
    2029             :         /* Return first data item (if any) */
    2030       13224 :         rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
    2031       13224 :                                          P_FIRSTDATAKEY(opaque));
    2032       13224 :         *rightfirstoffset = P_FIRSTDATAKEY(opaque);
    2033             :     }
    2034           8 :     else if (!P_ISLEAF(opaque) &&
    2035           4 :              nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
    2036             :     {
    2037             :         /*
    2038             :          * Return first item after the internal page's "negative infinity"
    2039             :          * item
    2040             :          */
    2041           4 :         rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
    2042           4 :                                          OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
    2043             :     }
    2044             :     else
    2045             :     {
    2046             :         /*
    2047             :          * No first item.  Page is probably empty leaf page, but it's also
    2048             :          * possible that it's an internal page with only a negative infinity
    2049             :          * item.
    2050             :          */
    2051           0 :         ereport(DEBUG2,
    2052             :                 (errcode(ERRCODE_NO_DATA),
    2053             :                  errmsg_internal("%s block %u of index \"%s\" has no first data item",
    2054             :                                  P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
    2055             :                                  RelationGetRelationName(state->rel))));
    2056           0 :         return NULL;
    2057             :     }
    2058             : 
    2059             :     /*
    2060             :      * Return first real item scankey.  Note that this relies on right page
    2061             :      * memory remaining allocated.
    2062             :      */
    2063       13228 :     firstitup = (IndexTuple) PageGetItem(rightpage, rightitem);
    2064       13228 :     return bt_mkscankey_pivotsearch(state->rel, firstitup);
    2065             : }
    2066             : 
    2067             : /*
    2068             :  * Check if two tuples are binary identical except the block number.  So,
    2069             :  * this function is capable to compare pivot keys on different levels.
    2070             :  */
    2071             : static bool
    2072        3724 : bt_pivot_tuple_identical(bool heapkeyspace, IndexTuple itup1, IndexTuple itup2)
    2073             : {
    2074        3724 :     if (IndexTupleSize(itup1) != IndexTupleSize(itup2))
    2075           0 :         return false;
    2076             : 
    2077        3724 :     if (heapkeyspace)
    2078             :     {
    2079             :         /*
    2080             :          * Offset number will contain important information in heapkeyspace
    2081             :          * indexes: the number of attributes left in the pivot tuple following
    2082             :          * suffix truncation.  Don't skip over it (compare it too).
    2083             :          */
    2084        3724 :         if (memcmp(&itup1->t_tid.ip_posid, &itup2->t_tid.ip_posid,
    2085        3724 :                    IndexTupleSize(itup1) -
    2086             :                    offsetof(ItemPointerData, ip_posid)) != 0)
    2087           0 :             return false;
    2088             :     }
    2089             :     else
    2090             :     {
    2091             :         /*
    2092             :          * Cannot rely on offset number field having consistent value across
    2093             :          * levels on pg_upgrade'd !heapkeyspace indexes.  Compare contents of
    2094             :          * tuple starting from just after item pointer (i.e. after block
    2095             :          * number and offset number).
    2096             :          */
    2097           0 :         if (memcmp(&itup1->t_info, &itup2->t_info,
    2098           0 :                    IndexTupleSize(itup1) -
    2099             :                    offsetof(IndexTupleData, t_info)) != 0)
    2100           0 :             return false;
    2101             :     }
    2102             : 
    2103        3724 :     return true;
    2104             : }
    2105             : 
    2106             : /*---
    2107             :  * Check high keys on the child level.  Traverse rightlinks from previous
    2108             :  * downlink to the current one.  Check that there are no intermediate pages
    2109             :  * with missing downlinks.
    2110             :  *
    2111             :  * If 'loaded_child' is given, it's assumed to be the page pointed to by the
    2112             :  * downlink referenced by 'downlinkoffnum' of the target page.
    2113             :  *
    2114             :  * Basically this function is called for each target downlink and checks two
    2115             :  * invariants:
    2116             :  *
    2117             :  * 1) You can reach the next child from previous one via rightlinks;
    2118             :  * 2) Each child high key have matching pivot key on target level.
    2119             :  *
    2120             :  * Consider the sample tree picture.
    2121             :  *
    2122             :  *               1
    2123             :  *           /       \
    2124             :  *        2     <->     3
    2125             :  *      /   \        /     \
    2126             :  *    4  <>  5  <> 6 <> 7 <> 8
    2127             :  *
    2128             :  * This function will be called for blocks 4, 5, 6 and 8.  Consider what is
    2129             :  * happening for each function call.
    2130             :  *
    2131             :  * - The function call for block 4 initializes data structure and matches high
    2132             :  *   key of block 4 to downlink's pivot key of block 2.
    2133             :  * - The high key of block 5 is matched to the high key of block 2.
    2134             :  * - The block 6 has an incomplete split flag set, so its high key isn't
    2135             :  *   matched to anything.
    2136             :  * - The function call for block 8 checks that block 8 can be found while
    2137             :  *   following rightlinks from block 6.  The high key of block 7 will be
    2138             :  *   matched to downlink's pivot key in block 3.
    2139             :  *
    2140             :  * There is also final call of this function, which checks that there is no
    2141             :  * missing downlinks for children to the right of the child referenced by
    2142             :  * rightmost downlink in target level.
    2143             :  */
    2144             : static void
    2145        3768 : bt_child_highkey_check(BtreeCheckState *state,
    2146             :                        OffsetNumber target_downlinkoffnum,
    2147             :                        Page loaded_child,
    2148             :                        uint32 target_level)
    2149             : {
    2150        3768 :     BlockNumber blkno = state->prevrightlink;
    2151             :     Page        page;
    2152             :     BTPageOpaque opaque;
    2153        3768 :     bool        rightsplit = state->previncompletesplit;
    2154        3768 :     bool        first = true;
    2155             :     ItemId      itemid;
    2156             :     IndexTuple  itup;
    2157             :     BlockNumber downlink;
    2158             : 
    2159        3768 :     if (OffsetNumberIsValid(target_downlinkoffnum))
    2160             :     {
    2161        3746 :         itemid = PageGetItemIdCareful(state, state->targetblock,
    2162             :                                       state->target, target_downlinkoffnum);
    2163        3746 :         itup = (IndexTuple) PageGetItem(state->target, itemid);
    2164        3746 :         downlink = BTreeTupleGetDownLink(itup);
    2165             :     }
    2166             :     else
    2167             :     {
    2168          22 :         downlink = P_NONE;
    2169             :     }
    2170             : 
    2171             :     /*
    2172             :      * If no previous rightlink is memorized for current level just below
    2173             :      * target page's level, we are about to start from the leftmost page. We
    2174             :      * can't follow rightlinks from previous page, because there is no
    2175             :      * previous page.  But we still can match high key.
    2176             :      *
    2177             :      * So we initialize variables for the loop above like there is previous
    2178             :      * page referencing current child.  Also we imply previous page to not
    2179             :      * have incomplete split flag, that would make us require downlink for
    2180             :      * current child.  That's correct, because leftmost page on the level
    2181             :      * should always have parent downlink.
    2182             :      */
    2183        3768 :     if (!BlockNumberIsValid(blkno))
    2184             :     {
    2185          22 :         blkno = downlink;
    2186          22 :         rightsplit = false;
    2187             :     }
    2188             : 
    2189             :     /* Move to the right on the child level */
    2190             :     while (true)
    2191             :     {
    2192             :         /*
    2193             :          * Did we traverse the whole tree level and this is check for pages to
    2194             :          * the right of rightmost downlink?
    2195             :          */
    2196        3768 :         if (blkno == P_NONE && downlink == P_NONE)
    2197             :         {
    2198          22 :             state->prevrightlink = InvalidBlockNumber;
    2199          22 :             state->previncompletesplit = false;
    2200          22 :             return;
    2201             :         }
    2202             : 
    2203             :         /* Did we traverse the whole tree level and don't find next downlink? */
    2204        3746 :         if (blkno == P_NONE)
    2205           0 :             ereport(ERROR,
    2206             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2207             :                      errmsg("can't traverse from downlink %u to downlink %u of index \"%s\"",
    2208             :                             state->prevrightlink, downlink,
    2209             :                             RelationGetRelationName(state->rel))));
    2210             : 
    2211             :         /* Load page contents */
    2212        3746 :         if (blkno == downlink && loaded_child)
    2213        3722 :             page = loaded_child;
    2214             :         else
    2215          24 :             page = palloc_btree_page(state, blkno);
    2216             : 
    2217        3746 :         opaque = BTPageGetOpaque(page);
    2218             : 
    2219             :         /* The first page we visit at the level should be leftmost */
    2220        3746 :         if (first && !BlockNumberIsValid(state->prevrightlink) &&
    2221          22 :             !bt_leftmost_ignoring_half_dead(state, blkno, opaque))
    2222           0 :             ereport(ERROR,
    2223             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2224             :                      errmsg("the first child of leftmost target page is not leftmost of its level in index \"%s\"",
    2225             :                             RelationGetRelationName(state->rel)),
    2226             :                      errdetail_internal("Target block=%u child block=%u target page lsn=%X/%08X.",
    2227             :                                         state->targetblock, blkno,
    2228             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    2229             : 
    2230             :         /* Do level sanity check */
    2231        3746 :         if ((!P_ISDELETED(opaque) || P_HAS_FULLXID(opaque)) &&
    2232        3746 :             opaque->btpo_level != target_level - 1)
    2233           0 :             ereport(ERROR,
    2234             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2235             :                      errmsg("block found while following rightlinks from child of index \"%s\" has invalid level",
    2236             :                             RelationGetRelationName(state->rel)),
    2237             :                      errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.",
    2238             :                                         blkno, target_level - 1, opaque->btpo_level)));
    2239             : 
    2240             :         /* Try to detect circular links */
    2241        3746 :         if ((!first && blkno == state->prevrightlink) || blkno == opaque->btpo_prev)
    2242           0 :             ereport(ERROR,
    2243             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2244             :                      errmsg("circular link chain found in block %u of index \"%s\"",
    2245             :                             blkno, RelationGetRelationName(state->rel))));
    2246             : 
    2247        3746 :         if (blkno != downlink && !P_IGNORE(opaque))
    2248             :         {
    2249             :             /* blkno probably has missing parent downlink */
    2250           0 :             bt_downlink_missing_check(state, rightsplit, blkno, page);
    2251             :         }
    2252             : 
    2253        3746 :         rightsplit = P_INCOMPLETE_SPLIT(opaque);
    2254             : 
    2255             :         /*
    2256             :          * If we visit page with high key, check that it is equal to the
    2257             :          * target key next to corresponding downlink.
    2258             :          */
    2259        3746 :         if (!rightsplit && !P_RIGHTMOST(opaque) && !P_ISHALFDEAD(opaque))
    2260             :         {
    2261             :             BTPageOpaque topaque;
    2262             :             IndexTuple  highkey;
    2263             :             OffsetNumber pivotkey_offset;
    2264             : 
    2265             :             /* Get high key */
    2266        3724 :             itemid = PageGetItemIdCareful(state, blkno, page, P_HIKEY);
    2267        3724 :             highkey = (IndexTuple) PageGetItem(page, itemid);
    2268             : 
    2269             :             /*
    2270             :              * There might be two situations when we examine high key.  If
    2271             :              * current child page is referenced by given target downlink, we
    2272             :              * should look to the next offset number for matching key from
    2273             :              * target page.
    2274             :              *
    2275             :              * Alternatively, we're following rightlinks somewhere in the
    2276             :              * middle between page referenced by previous target's downlink
    2277             :              * and the page referenced by current target's downlink.  If
    2278             :              * current child page hasn't incomplete split flag set, then its
    2279             :              * high key should match to the target's key of current offset
    2280             :              * number. This happens when a previous call here (to
    2281             :              * bt_child_highkey_check()) found an incomplete split, and we
    2282             :              * reach a right sibling page without a downlink -- the right
    2283             :              * sibling page's high key still needs to be matched to a
    2284             :              * separator key on the parent/target level.
    2285             :              *
    2286             :              * Don't apply OffsetNumberNext() to target_downlinkoffnum when we
    2287             :              * already had to step right on the child level. Our traversal of
    2288             :              * the child level must try to move in perfect lockstep behind (to
    2289             :              * the left of) the target/parent level traversal.
    2290             :              */
    2291        3724 :             if (blkno == downlink)
    2292        3724 :                 pivotkey_offset = OffsetNumberNext(target_downlinkoffnum);
    2293             :             else
    2294           0 :                 pivotkey_offset = target_downlinkoffnum;
    2295             : 
    2296        3724 :             topaque = BTPageGetOpaque(state->target);
    2297             : 
    2298        3724 :             if (!offset_is_negative_infinity(topaque, pivotkey_offset))
    2299             :             {
    2300             :                 /*
    2301             :                  * If we're looking for the next pivot tuple in target page,
    2302             :                  * but there is no more pivot tuples, then we should match to
    2303             :                  * high key instead.
    2304             :                  */
    2305        3724 :                 if (pivotkey_offset > PageGetMaxOffsetNumber(state->target))
    2306             :                 {
    2307           2 :                     if (P_RIGHTMOST(topaque))
    2308           0 :                         ereport(ERROR,
    2309             :                                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2310             :                                  errmsg("child high key is greater than rightmost pivot key on target level in index \"%s\"",
    2311             :                                         RelationGetRelationName(state->rel)),
    2312             :                                  errdetail_internal("Target block=%u child block=%u target page lsn=%X/%08X.",
    2313             :                                                     state->targetblock, blkno,
    2314             :                                                     LSN_FORMAT_ARGS(state->targetlsn))));
    2315           2 :                     pivotkey_offset = P_HIKEY;
    2316             :                 }
    2317        3724 :                 itemid = PageGetItemIdCareful(state, state->targetblock,
    2318             :                                               state->target, pivotkey_offset);
    2319        3724 :                 itup = (IndexTuple) PageGetItem(state->target, itemid);
    2320             :             }
    2321             :             else
    2322             :             {
    2323             :                 /*
    2324             :                  * We cannot try to match child's high key to a negative
    2325             :                  * infinity key in target, since there is nothing to compare.
    2326             :                  * However, it's still possible to match child's high key
    2327             :                  * outside of target page.  The reason why we're are is that
    2328             :                  * bt_child_highkey_check() was previously called for the
    2329             :                  * cousin page of 'loaded_child', which is incomplete split.
    2330             :                  * So, now we traverse to the right of that cousin page and
    2331             :                  * current child level page under consideration still belongs
    2332             :                  * to the subtree of target's left sibling.  Thus, we need to
    2333             :                  * match child's high key to its left uncle page high key.
    2334             :                  * Thankfully we saved it, it's called a "low key" of target
    2335             :                  * page.
    2336             :                  */
    2337           0 :                 if (!state->lowkey)
    2338           0 :                     ereport(ERROR,
    2339             :                             (errcode(ERRCODE_INDEX_CORRUPTED),
    2340             :                              errmsg("can't find left sibling high key in index \"%s\"",
    2341             :                                     RelationGetRelationName(state->rel)),
    2342             :                              errdetail_internal("Target block=%u child block=%u target page lsn=%X/%08X.",
    2343             :                                                 state->targetblock, blkno,
    2344             :                                                 LSN_FORMAT_ARGS(state->targetlsn))));
    2345           0 :                 itup = state->lowkey;
    2346             :             }
    2347             : 
    2348        3724 :             if (!bt_pivot_tuple_identical(state->heapkeyspace, highkey, itup))
    2349             :             {
    2350           0 :                 ereport(ERROR,
    2351             :                         (errcode(ERRCODE_INDEX_CORRUPTED),
    2352             :                          errmsg("mismatch between parent key and child high key in index \"%s\"",
    2353             :                                 RelationGetRelationName(state->rel)),
    2354             :                          errdetail_internal("Target block=%u child block=%u target page lsn=%X/%08X.",
    2355             :                                             state->targetblock, blkno,
    2356             :                                             LSN_FORMAT_ARGS(state->targetlsn))));
    2357             :             }
    2358             :         }
    2359             : 
    2360             :         /* Exit if we already found next downlink */
    2361        3746 :         if (blkno == downlink)
    2362             :         {
    2363        3746 :             state->prevrightlink = opaque->btpo_next;
    2364        3746 :             state->previncompletesplit = rightsplit;
    2365        3746 :             return;
    2366             :         }
    2367             : 
    2368             :         /* Traverse to the next page using rightlink */
    2369           0 :         blkno = opaque->btpo_next;
    2370             : 
    2371             :         /* Free page contents if it's allocated by us */
    2372           0 :         if (page != loaded_child)
    2373           0 :             pfree(page);
    2374           0 :         first = false;
    2375             :     }
    2376             : }
    2377             : 
    2378             : /*
    2379             :  * Checks one of target's downlink against its child page.
    2380             :  *
    2381             :  * Conceptually, the target page continues to be what is checked here.  The
    2382             :  * target block is still blamed in the event of finding an invariant violation.
    2383             :  * The downlink insertion into the target is probably where any problem raised
    2384             :  * here arises, and there is no such thing as a parent link, so doing the
    2385             :  * verification this way around is much more practical.
    2386             :  *
    2387             :  * This function visits child page and it's sequentially called for each
    2388             :  * downlink of target page.  Assuming this we also check downlink connectivity
    2389             :  * here in order to save child page visits.
    2390             :  */
    2391             : static void
    2392        3722 : bt_child_check(BtreeCheckState *state, BTScanInsert targetkey,
    2393             :                OffsetNumber downlinkoffnum)
    2394             : {
    2395             :     ItemId      itemid;
    2396             :     IndexTuple  itup;
    2397             :     BlockNumber childblock;
    2398             :     OffsetNumber offset;
    2399             :     OffsetNumber maxoffset;
    2400             :     Page        child;
    2401             :     BTPageOpaque copaque;
    2402             :     BTPageOpaque topaque;
    2403             : 
    2404        3722 :     itemid = PageGetItemIdCareful(state, state->targetblock,
    2405             :                                   state->target, downlinkoffnum);
    2406        3722 :     itup = (IndexTuple) PageGetItem(state->target, itemid);
    2407        3722 :     childblock = BTreeTupleGetDownLink(itup);
    2408             : 
    2409             :     /*
    2410             :      * Caller must have ShareLock on target relation, because of
    2411             :      * considerations around page deletion by VACUUM.
    2412             :      *
    2413             :      * NB: In general, page deletion deletes the right sibling's downlink, not
    2414             :      * the downlink of the page being deleted; the deleted page's downlink is
    2415             :      * reused for its sibling.  The key space is thereby consolidated between
    2416             :      * the deleted page and its right sibling.  (We cannot delete a parent
    2417             :      * page's rightmost child unless it is the last child page, and we intend
    2418             :      * to also delete the parent itself.)
    2419             :      *
    2420             :      * If this verification happened without a ShareLock, the following race
    2421             :      * condition could cause false positives:
    2422             :      *
    2423             :      * In general, concurrent page deletion might occur, including deletion of
    2424             :      * the left sibling of the child page that is examined here.  If such a
    2425             :      * page deletion were to occur, closely followed by an insertion into the
    2426             :      * newly expanded key space of the child, a window for the false positive
    2427             :      * opens up: the stale parent/target downlink originally followed to get
    2428             :      * to the child legitimately ceases to be a lower bound on all items in
    2429             :      * the page, since the key space was concurrently expanded "left".
    2430             :      * (Insertion followed the "new" downlink for the child, not our now-stale
    2431             :      * downlink, which was concurrently physically removed in target/parent as
    2432             :      * part of deletion's first phase.)
    2433             :      *
    2434             :      * While we use various techniques elsewhere to perform cross-page
    2435             :      * verification for !readonly callers, a similar trick seems difficult
    2436             :      * here.  The tricks used by bt_recheck_sibling_links and by
    2437             :      * bt_right_page_check_scankey both involve verification of a same-level,
    2438             :      * cross-sibling invariant.  Cross-level invariants are far more squishy,
    2439             :      * though.  The nbtree REDO routines do not actually couple buffer locks
    2440             :      * across levels during page splits, so making any cross-level check work
    2441             :      * reliably in !readonly mode may be impossible.
    2442             :      */
    2443             :     Assert(state->readonly);
    2444             : 
    2445             :     /*
    2446             :      * Verify child page has the downlink key from target page (its parent) as
    2447             :      * a lower bound; downlink must be strictly less than all keys on the
    2448             :      * page.
    2449             :      *
    2450             :      * Check all items, rather than checking just the first and trusting that
    2451             :      * the operator class obeys the transitive law.
    2452             :      */
    2453        3722 :     topaque = BTPageGetOpaque(state->target);
    2454        3722 :     child = palloc_btree_page(state, childblock);
    2455        3722 :     copaque = BTPageGetOpaque(child);
    2456        3722 :     maxoffset = PageGetMaxOffsetNumber(child);
    2457             : 
    2458             :     /*
    2459             :      * Since we've already loaded the child block, combine this check with
    2460             :      * check for downlink connectivity.
    2461             :      */
    2462        3722 :     bt_child_highkey_check(state, downlinkoffnum,
    2463             :                            child, topaque->btpo_level);
    2464             : 
    2465             :     /*
    2466             :      * Since there cannot be a concurrent VACUUM operation in readonly mode,
    2467             :      * and since a page has no links within other pages (siblings and parent)
    2468             :      * once it is marked fully deleted, it should be impossible to land on a
    2469             :      * fully deleted page.
    2470             :      *
    2471             :      * It does not quite make sense to enforce that the page cannot even be
    2472             :      * half-dead, despite the fact the downlink is modified at the same stage
    2473             :      * that the child leaf page is marked half-dead.  That's incorrect because
    2474             :      * there may occasionally be multiple downlinks from a chain of pages
    2475             :      * undergoing deletion, where multiple successive calls are made to
    2476             :      * _bt_unlink_halfdead_page() by VACUUM before it can finally safely mark
    2477             :      * the leaf page as fully dead.  While _bt_mark_page_halfdead() usually
    2478             :      * removes the downlink to the leaf page that is marked half-dead, that's
    2479             :      * not guaranteed, so it's possible we'll land on a half-dead page with a
    2480             :      * downlink due to an interrupted multi-level page deletion.
    2481             :      *
    2482             :      * We go ahead with our checks if the child page is half-dead.  It's safe
    2483             :      * to do so because we do not test the child's high key, so it does not
    2484             :      * matter that the original high key will have been replaced by a dummy
    2485             :      * truncated high key within _bt_mark_page_halfdead().  All other page
    2486             :      * items are left intact on a half-dead page, so there is still something
    2487             :      * to test.
    2488             :      */
    2489        3722 :     if (P_ISDELETED(copaque))
    2490           0 :         ereport(ERROR,
    2491             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2492             :                  errmsg("downlink to deleted page found in index \"%s\"",
    2493             :                         RelationGetRelationName(state->rel)),
    2494             :                  errdetail_internal("Parent block=%u child block=%u parent page lsn=%X/%08X.",
    2495             :                                     state->targetblock, childblock,
    2496             :                                     LSN_FORMAT_ARGS(state->targetlsn))));
    2497             : 
    2498        3722 :     for (offset = P_FIRSTDATAKEY(copaque);
    2499     1200700 :          offset <= maxoffset;
    2500     1196978 :          offset = OffsetNumberNext(offset))
    2501             :     {
    2502             :         /*
    2503             :          * Skip comparison of target page key against "negative infinity"
    2504             :          * item, if any.  Checking it would indicate that it's not a strict
    2505             :          * lower bound, but that's only because of the hard-coding for
    2506             :          * negative infinity items within _bt_compare().
    2507             :          *
    2508             :          * If nbtree didn't truncate negative infinity tuples during internal
    2509             :          * page splits then we'd expect child's negative infinity key to be
    2510             :          * equal to the scankey/downlink from target/parent (it would be a
    2511             :          * "low key" in this hypothetical scenario, and so it would still need
    2512             :          * to be treated as a special case here).
    2513             :          *
    2514             :          * Negative infinity items can be thought of as a strict lower bound
    2515             :          * that works transitively, with the last non-negative-infinity pivot
    2516             :          * followed during a descent from the root as its "true" strict lower
    2517             :          * bound.  Only a small number of negative infinity items are truly
    2518             :          * negative infinity; those that are the first items of leftmost
    2519             :          * internal pages.  In more general terms, a negative infinity item is
    2520             :          * only negative infinity with respect to the subtree that the page is
    2521             :          * at the root of.
    2522             :          *
    2523             :          * See also: bt_rootdescend(), which can even detect transitive
    2524             :          * inconsistencies on cousin leaf pages.
    2525             :          */
    2526     1196978 :         if (offset_is_negative_infinity(copaque, offset))
    2527           2 :             continue;
    2528             : 
    2529     1196976 :         if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
    2530             :                                           offset))
    2531           0 :             ereport(ERROR,
    2532             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2533             :                      errmsg("down-link lower bound invariant violated for index \"%s\"",
    2534             :                             RelationGetRelationName(state->rel)),
    2535             :                      errdetail_internal("Parent block=%u child index tid=(%u,%u) parent page lsn=%X/%08X.",
    2536             :                                         state->targetblock, childblock, offset,
    2537             :                                         LSN_FORMAT_ARGS(state->targetlsn))));
    2538             :     }
    2539             : 
    2540        3722 :     pfree(child);
    2541        3722 : }
    2542             : 
    2543             : /*
    2544             :  * Checks if page is missing a downlink that it should have.
    2545             :  *
    2546             :  * A page that lacks a downlink/parent may indicate corruption.  However, we
    2547             :  * must account for the fact that a missing downlink can occasionally be
    2548             :  * encountered in a non-corrupt index.  This can be due to an interrupted page
    2549             :  * split, or an interrupted multi-level page deletion (i.e. there was a hard
    2550             :  * crash or an error during a page split, or while VACUUM was deleting a
    2551             :  * multi-level chain of pages).
    2552             :  *
    2553             :  * Note that this can only be called in readonly mode, so there is no need to
    2554             :  * be concerned about concurrent page splits or page deletions.
    2555             :  */
    2556             : static void
    2557           0 : bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit,
    2558             :                           BlockNumber blkno, Page page)
    2559             : {
    2560           0 :     BTPageOpaque opaque = BTPageGetOpaque(page);
    2561             :     ItemId      itemid;
    2562             :     IndexTuple  itup;
    2563             :     Page        child;
    2564             :     BTPageOpaque copaque;
    2565             :     uint32      level;
    2566             :     BlockNumber childblk;
    2567             :     XLogRecPtr  pagelsn;
    2568             : 
    2569             :     Assert(state->readonly);
    2570             :     Assert(!P_IGNORE(opaque));
    2571             : 
    2572             :     /* No next level up with downlinks to fingerprint from the true root */
    2573           0 :     if (P_ISROOT(opaque))
    2574           0 :         return;
    2575             : 
    2576           0 :     pagelsn = PageGetLSN(page);
    2577             : 
    2578             :     /*
    2579             :      * Incomplete (interrupted) page splits can account for the lack of a
    2580             :      * downlink.  Some inserting transaction should eventually complete the
    2581             :      * page split in passing, when it notices that the left sibling page is
    2582             :      * P_INCOMPLETE_SPLIT().
    2583             :      *
    2584             :      * In general, VACUUM is not prepared for there to be no downlink to a
    2585             :      * page that it deletes.  This is the main reason why the lack of a
    2586             :      * downlink can be reported as corruption here.  It's not obvious that an
    2587             :      * invalid missing downlink can result in wrong answers to queries,
    2588             :      * though, since index scans that land on the child may end up
    2589             :      * consistently moving right. The handling of concurrent page splits (and
    2590             :      * page deletions) within _bt_moveright() cannot distinguish
    2591             :      * inconsistencies that last for a moment from inconsistencies that are
    2592             :      * permanent and irrecoverable.
    2593             :      *
    2594             :      * VACUUM isn't even prepared to delete pages that have no downlink due to
    2595             :      * an incomplete page split, but it can detect and reason about that case
    2596             :      * by design, so it shouldn't be taken to indicate corruption.  See
    2597             :      * _bt_pagedel() for full details.
    2598             :      */
    2599           0 :     if (rightsplit)
    2600             :     {
    2601           0 :         ereport(DEBUG1,
    2602             :                 (errcode(ERRCODE_NO_DATA),
    2603             :                  errmsg_internal("harmless interrupted page split detected in index \"%s\"",
    2604             :                                  RelationGetRelationName(state->rel)),
    2605             :                  errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%08X.",
    2606             :                                     blkno, opaque->btpo_level,
    2607             :                                     opaque->btpo_prev,
    2608             :                                     LSN_FORMAT_ARGS(pagelsn))));
    2609           0 :         return;
    2610             :     }
    2611             : 
    2612             :     /*
    2613             :      * Page under check is probably the "top parent" of a multi-level page
    2614             :      * deletion.  We'll need to descend the subtree to make sure that
    2615             :      * descendant pages are consistent with that, though.
    2616             :      *
    2617             :      * If the page (which must be non-ignorable) is a leaf page, then clearly
    2618             :      * it can't be the top parent.  The lack of a downlink is probably a
    2619             :      * symptom of a broad problem that could just as easily cause
    2620             :      * inconsistencies anywhere else.
    2621             :      */
    2622           0 :     if (P_ISLEAF(opaque))
    2623           0 :         ereport(ERROR,
    2624             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2625             :                  errmsg("leaf index block lacks downlink in index \"%s\"",
    2626             :                         RelationGetRelationName(state->rel)),
    2627             :                  errdetail_internal("Block=%u page lsn=%X/%08X.",
    2628             :                                     blkno,
    2629             :                                     LSN_FORMAT_ARGS(pagelsn))));
    2630             : 
    2631             :     /* Descend from the given page, which is an internal page */
    2632           0 :     elog(DEBUG1, "checking for interrupted multi-level deletion due to missing downlink in index \"%s\"",
    2633             :          RelationGetRelationName(state->rel));
    2634             : 
    2635           0 :     level = opaque->btpo_level;
    2636           0 :     itemid = PageGetItemIdCareful(state, blkno, page, P_FIRSTDATAKEY(opaque));
    2637           0 :     itup = (IndexTuple) PageGetItem(page, itemid);
    2638           0 :     childblk = BTreeTupleGetDownLink(itup);
    2639             :     for (;;)
    2640             :     {
    2641           0 :         CHECK_FOR_INTERRUPTS();
    2642             : 
    2643           0 :         child = palloc_btree_page(state, childblk);
    2644           0 :         copaque = BTPageGetOpaque(child);
    2645             : 
    2646           0 :         if (P_ISLEAF(copaque))
    2647           0 :             break;
    2648             : 
    2649             :         /* Do an extra sanity check in passing on internal pages */
    2650           0 :         if (copaque->btpo_level != level - 1)
    2651           0 :             ereport(ERROR,
    2652             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2653             :                      errmsg_internal("downlink points to block in index \"%s\" whose level is not one level down",
    2654             :                                      RelationGetRelationName(state->rel)),
    2655             :                      errdetail_internal("Top parent/under check block=%u block pointed to=%u expected level=%u level in pointed to block=%u.",
    2656             :                                         blkno, childblk,
    2657             :                                         level - 1, copaque->btpo_level)));
    2658             : 
    2659           0 :         level = copaque->btpo_level;
    2660           0 :         itemid = PageGetItemIdCareful(state, childblk, child,
    2661           0 :                                       P_FIRSTDATAKEY(copaque));
    2662           0 :         itup = (IndexTuple) PageGetItem(child, itemid);
    2663           0 :         childblk = BTreeTupleGetDownLink(itup);
    2664             :         /* Be slightly more pro-active in freeing this memory, just in case */
    2665           0 :         pfree(child);
    2666             :     }
    2667             : 
    2668             :     /*
    2669             :      * Since there cannot be a concurrent VACUUM operation in readonly mode,
    2670             :      * and since a page has no links within other pages (siblings and parent)
    2671             :      * once it is marked fully deleted, it should be impossible to land on a
    2672             :      * fully deleted page.  See bt_child_check() for further details.
    2673             :      *
    2674             :      * The bt_child_check() P_ISDELETED() check is repeated here because
    2675             :      * bt_child_check() does not visit pages reachable through negative
    2676             :      * infinity items.  Besides, bt_child_check() is unwilling to descend
    2677             :      * multiple levels.  (The similar bt_child_check() P_ISDELETED() check
    2678             :      * within bt_check_level_from_leftmost() won't reach the page either,
    2679             :      * since the leaf's live siblings should have their sibling links updated
    2680             :      * to bypass the deletion target page when it is marked fully dead.)
    2681             :      *
    2682             :      * If this error is raised, it might be due to a previous multi-level page
    2683             :      * deletion that failed to realize that it wasn't yet safe to mark the
    2684             :      * leaf page as fully dead.  A "dangling downlink" will still remain when
    2685             :      * this happens.  The fact that the dangling downlink's page (the leaf's
    2686             :      * parent/ancestor page) lacked a downlink is incidental.
    2687             :      */
    2688           0 :     if (P_ISDELETED(copaque))
    2689           0 :         ereport(ERROR,
    2690             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2691             :                  errmsg_internal("downlink to deleted leaf page found in index \"%s\"",
    2692             :                                  RelationGetRelationName(state->rel)),
    2693             :                  errdetail_internal("Top parent/target block=%u leaf block=%u top parent/under check lsn=%X/%08X.",
    2694             :                                     blkno, childblk,
    2695             :                                     LSN_FORMAT_ARGS(pagelsn))));
    2696             : 
    2697             :     /*
    2698             :      * Iff leaf page is half-dead, its high key top parent link should point
    2699             :      * to what VACUUM considered to be the top parent page at the instant it
    2700             :      * was interrupted.  Provided the high key link actually points to the
    2701             :      * page under check, the missing downlink we detected is consistent with
    2702             :      * there having been an interrupted multi-level page deletion.  This means
    2703             :      * that the subtree with the page under check at its root (a page deletion
    2704             :      * chain) is in a consistent state, enabling VACUUM to resume deleting the
    2705             :      * entire chain the next time it encounters the half-dead leaf page.
    2706             :      */
    2707           0 :     if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
    2708             :     {
    2709           0 :         itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
    2710           0 :         itup = (IndexTuple) PageGetItem(child, itemid);
    2711           0 :         if (BTreeTupleGetTopParent(itup) == blkno)
    2712           0 :             return;
    2713             :     }
    2714             : 
    2715           0 :     ereport(ERROR,
    2716             :             (errcode(ERRCODE_INDEX_CORRUPTED),
    2717             :              errmsg("internal index block lacks downlink in index \"%s\"",
    2718             :                     RelationGetRelationName(state->rel)),
    2719             :              errdetail_internal("Block=%u level=%u page lsn=%X/%08X.",
    2720             :                                 blkno, opaque->btpo_level,
    2721             :                                 LSN_FORMAT_ARGS(pagelsn))));
    2722             : }
    2723             : 
    2724             : /*
    2725             :  * Per-tuple callback from table_index_build_scan, used to determine if index has
    2726             :  * all the entries that definitely should have been observed in leaf pages of
    2727             :  * the target index (that is, all IndexTuples that were fingerprinted by our
    2728             :  * Bloom filter).  All heapallindexed checks occur here.
    2729             :  *
    2730             :  * The redundancy between an index and the table it indexes provides a good
    2731             :  * opportunity to detect corruption, especially corruption within the table.
    2732             :  * The high level principle behind the verification performed here is that any
    2733             :  * IndexTuple that should be in an index following a fresh CREATE INDEX (based
    2734             :  * on the same index definition) should also have been in the original,
    2735             :  * existing index, which should have used exactly the same representation
    2736             :  *
    2737             :  * Since the overall structure of the index has already been verified, the most
    2738             :  * likely explanation for error here is a corrupt heap page (could be logical
    2739             :  * or physical corruption).  Index corruption may still be detected here,
    2740             :  * though.  Only readonly callers will have verified that left links and right
    2741             :  * links are in agreement, and so it's possible that a leaf page transposition
    2742             :  * within index is actually the source of corruption detected here (for
    2743             :  * !readonly callers).  The checks performed only for readonly callers might
    2744             :  * more accurately frame the problem as a cross-page invariant issue (this
    2745             :  * could even be due to recovery not replaying all WAL records).  The !readonly
    2746             :  * ERROR message raised here includes a HINT about retrying with readonly
    2747             :  * verification, just in case it's a cross-page invariant issue, though that
    2748             :  * isn't particularly likely.
    2749             :  *
    2750             :  * table_index_build_scan() expects to be able to find the root tuple when a
    2751             :  * heap-only tuple (the live tuple at the end of some HOT chain) needs to be
    2752             :  * indexed, in order to replace the actual tuple's TID with the root tuple's
    2753             :  * TID (which is what we're actually passed back here).  The index build heap
    2754             :  * scan code will raise an error when a tuple that claims to be the root of the
    2755             :  * heap-only tuple's HOT chain cannot be located.  This catches cases where the
    2756             :  * original root item offset/root tuple for a HOT chain indicates (for whatever
    2757             :  * reason) that the entire HOT chain is dead, despite the fact that the latest
    2758             :  * heap-only tuple should be indexed.  When this happens, sequential scans may
    2759             :  * always give correct answers, and all indexes may be considered structurally
    2760             :  * consistent (i.e. the nbtree structural checks would not detect corruption).
    2761             :  * It may be the case that only index scans give wrong answers, and yet heap or
    2762             :  * SLRU corruption is the real culprit.  (While it's true that LP_DEAD bit
    2763             :  * setting will probably also leave the index in a corrupt state before too
    2764             :  * long, the problem is nonetheless that there is heap corruption.)
    2765             :  *
    2766             :  * Heap-only tuple handling within table_index_build_scan() works in a way that
    2767             :  * helps us to detect index tuples that contain the wrong values (values that
    2768             :  * don't match the latest tuple in the HOT chain).  This can happen when there
    2769             :  * is no superseding index tuple due to a faulty assessment of HOT safety,
    2770             :  * perhaps during the original CREATE INDEX.  Because the latest tuple's
    2771             :  * contents are used with the root TID, an error will be raised when a tuple
    2772             :  * with the same TID but non-matching attribute values is passed back to us.
    2773             :  * Faulty assessment of HOT-safety was behind at least two distinct CREATE
    2774             :  * INDEX CONCURRENTLY bugs that made it into stable releases, one of which was
    2775             :  * undetected for many years.  In short, the same principle that allows a
    2776             :  * REINDEX to repair corruption when there was an (undetected) broken HOT chain
    2777             :  * also allows us to detect the corruption in many cases.
    2778             :  */
    2779             : static void
    2780     1063694 : bt_tuple_present_callback(Relation index, ItemPointer tid, Datum *values,
    2781             :                           bool *isnull, bool tupleIsAlive, void *checkstate)
    2782             : {
    2783     1063694 :     BtreeCheckState *state = (BtreeCheckState *) checkstate;
    2784             :     IndexTuple  itup,
    2785             :                 norm;
    2786             : 
    2787             :     Assert(state->heapallindexed);
    2788             : 
    2789             :     /* Generate a normalized index tuple for fingerprinting */
    2790     1063694 :     itup = index_form_tuple(RelationGetDescr(index), values, isnull);
    2791     1063694 :     itup->t_tid = *tid;
    2792     1063694 :     norm = bt_normalize_tuple(state, itup);
    2793             : 
    2794             :     /* Probe Bloom filter -- tuple should be present */
    2795     1063694 :     if (bloom_lacks_element(state->filter, (unsigned char *) norm,
    2796             :                             IndexTupleSize(norm)))
    2797           0 :         ereport(ERROR,
    2798             :                 (errcode(ERRCODE_DATA_CORRUPTED),
    2799             :                  errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"",
    2800             :                         ItemPointerGetBlockNumber(&(itup->t_tid)),
    2801             :                         ItemPointerGetOffsetNumber(&(itup->t_tid)),
    2802             :                         RelationGetRelationName(state->heaprel),
    2803             :                         RelationGetRelationName(state->rel)),
    2804             :                  !state->readonly
    2805             :                  ? errhint("Retrying verification using the function bt_index_parent_check() might provide a more specific error.")
    2806             :                  : 0));
    2807             : 
    2808     1063694 :     state->heaptuplespresent++;
    2809     1063694 :     pfree(itup);
    2810             :     /* Cannot leak memory here */
    2811     1063694 :     if (norm != itup)
    2812          10 :         pfree(norm);
    2813     1063694 : }
    2814             : 
    2815             : /*
    2816             :  * Normalize an index tuple for fingerprinting.
    2817             :  *
    2818             :  * In general, index tuple formation is assumed to be deterministic by
    2819             :  * heapallindexed verification, and IndexTuples are assumed immutable.  While
    2820             :  * the LP_DEAD bit is mutable in leaf pages, that's ItemId metadata, which is
    2821             :  * not fingerprinted.  Normalization is required to compensate for corner
    2822             :  * cases where the determinism assumption doesn't quite work.
    2823             :  *
    2824             :  * There is currently one such case: index_form_tuple() does not try to hide
    2825             :  * the source TOAST state of input datums.  The executor applies TOAST
    2826             :  * compression for heap tuples based on different criteria to the compression
    2827             :  * applied within btinsert()'s call to index_form_tuple(): it sometimes
    2828             :  * compresses more aggressively, resulting in compressed heap tuple datums but
    2829             :  * uncompressed corresponding index tuple datums.  A subsequent heapallindexed
    2830             :  * verification will get a logically equivalent though bitwise unequal tuple
    2831             :  * from index_form_tuple().  False positive heapallindexed corruption reports
    2832             :  * could occur without normalizing away the inconsistency.
    2833             :  *
    2834             :  * Returned tuple is often caller's own original tuple.  Otherwise, it is a
    2835             :  * new representation of caller's original index tuple, palloc()'d in caller's
    2836             :  * memory context.
    2837             :  *
    2838             :  * Note: This routine is not concerned with distinctions about the
    2839             :  * representation of tuples beyond those that might break heapallindexed
    2840             :  * verification.  In particular, it won't try to normalize opclass-equal
    2841             :  * datums with potentially distinct representations (e.g., btree/numeric_ops
    2842             :  * index datums will not get their display scale normalized-away here).
    2843             :  * Caller does normalization for non-pivot tuples that have a posting list,
    2844             :  * since dummy CREATE INDEX callback code generates new tuples with the same
    2845             :  * normalized representation.
    2846             :  */
    2847             : static IndexTuple
    2848     2132178 : bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
    2849             : {
    2850     2132178 :     TupleDesc   tupleDescriptor = RelationGetDescr(state->rel);
    2851             :     Datum       normalized[INDEX_MAX_KEYS];
    2852             :     bool        isnull[INDEX_MAX_KEYS];
    2853             :     bool        need_free[INDEX_MAX_KEYS];
    2854     2132178 :     bool        formnewtup = false;
    2855             :     IndexTuple  reformed;
    2856             :     int         i;
    2857             : 
    2858             :     /* Caller should only pass "logical" non-pivot tuples here */
    2859             :     Assert(!BTreeTupleIsPosting(itup) && !BTreeTupleIsPivot(itup));
    2860             : 
    2861             :     /* Easy case: It's immediately clear that tuple has no varlena datums */
    2862     2132178 :     if (!IndexTupleHasVarwidths(itup))
    2863     2132130 :         return itup;
    2864             : 
    2865          96 :     for (i = 0; i < tupleDescriptor->natts; i++)
    2866             :     {
    2867             :         Form_pg_attribute att;
    2868             : 
    2869          48 :         att = TupleDescAttr(tupleDescriptor, i);
    2870             : 
    2871             :         /* Assume untoasted/already normalized datum initially */
    2872          48 :         need_free[i] = false;
    2873          48 :         normalized[i] = index_getattr(itup, att->attnum,
    2874             :                                       tupleDescriptor,
    2875             :                                       &isnull[i]);
    2876          48 :         if (att->attbyval || att->attlen != -1 || isnull[i])
    2877           0 :             continue;
    2878             : 
    2879             :         /*
    2880             :          * Callers always pass a tuple that could safely be inserted into the
    2881             :          * index without further processing, so an external varlena header
    2882             :          * should never be encountered here
    2883             :          */
    2884          48 :         if (VARATT_IS_EXTERNAL(DatumGetPointer(normalized[i])))
    2885           0 :             ereport(ERROR,
    2886             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2887             :                      errmsg("external varlena datum in tuple that references heap row (%u,%u) in index \"%s\"",
    2888             :                             ItemPointerGetBlockNumber(&(itup->t_tid)),
    2889             :                             ItemPointerGetOffsetNumber(&(itup->t_tid)),
    2890             :                             RelationGetRelationName(state->rel))));
    2891          92 :         else if (!VARATT_IS_COMPRESSED(DatumGetPointer(normalized[i])) &&
    2892          44 :                  VARSIZE(DatumGetPointer(normalized[i])) > TOAST_INDEX_TARGET &&
    2893          42 :                  (att->attstorage == TYPSTORAGE_EXTENDED ||
    2894          32 :                   att->attstorage == TYPSTORAGE_MAIN))
    2895             :         {
    2896             :             /*
    2897             :              * This value will be compressed by index_form_tuple() with the
    2898             :              * current storage settings.  We may be here because this tuple
    2899             :              * was formed with different storage settings.  So, force forming.
    2900             :              */
    2901          10 :             formnewtup = true;
    2902             :         }
    2903          38 :         else if (VARATT_IS_COMPRESSED(DatumGetPointer(normalized[i])))
    2904             :         {
    2905           4 :             formnewtup = true;
    2906           4 :             normalized[i] = PointerGetDatum(PG_DETOAST_DATUM(normalized[i]));
    2907           4 :             need_free[i] = true;
    2908             :         }
    2909             : 
    2910             :         /*
    2911             :          * Short tuples may have 1B or 4B header. Convert 4B header of short
    2912             :          * tuples to 1B
    2913             :          */
    2914          34 :         else if (VARATT_CAN_MAKE_SHORT(DatumGetPointer(normalized[i])))
    2915             :         {
    2916             :             /* convert to short varlena */
    2917           2 :             Size        len = VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(normalized[i]));
    2918           2 :             char       *data = palloc(len);
    2919             : 
    2920           2 :             SET_VARSIZE_SHORT(data, len);
    2921           2 :             memcpy(data + 1, VARDATA(DatumGetPointer(normalized[i])), len - 1);
    2922             : 
    2923           2 :             formnewtup = true;
    2924           2 :             normalized[i] = PointerGetDatum(data);
    2925           2 :             need_free[i] = true;
    2926             :         }
    2927             :     }
    2928             : 
    2929             :     /*
    2930             :      * Easier case: Tuple has varlena datums, none of which are compressed or
    2931             :      * short with 4B header
    2932             :      */
    2933          48 :     if (!formnewtup)
    2934          32 :         return itup;
    2935             : 
    2936             :     /*
    2937             :      * Hard case: Tuple had compressed varlena datums that necessitate
    2938             :      * creating normalized version of the tuple from uncompressed input datums
    2939             :      * (normalized input datums).  This is rather naive, but shouldn't be
    2940             :      * necessary too often.
    2941             :      *
    2942             :      * In the heap, tuples may contain short varlena datums with both 1B
    2943             :      * header and 4B headers.  But the corresponding index tuple should always
    2944             :      * have such varlena's with 1B headers.  So, if there is a short varlena
    2945             :      * with 4B header, we need to convert it for fingerprinting.
    2946             :      *
    2947             :      * Note that we rely on deterministic index_form_tuple() TOAST compression
    2948             :      * of normalized input.
    2949             :      */
    2950          16 :     reformed = index_form_tuple(tupleDescriptor, normalized, isnull);
    2951          16 :     reformed->t_tid = itup->t_tid;
    2952             : 
    2953             :     /* Cannot leak memory here */
    2954          32 :     for (i = 0; i < tupleDescriptor->natts; i++)
    2955          16 :         if (need_free[i])
    2956           6 :             pfree(DatumGetPointer(normalized[i]));
    2957             : 
    2958          16 :     return reformed;
    2959             : }
    2960             : 
    2961             : /*
    2962             :  * Produce palloc()'d "plain" tuple for nth posting list entry/TID.
    2963             :  *
    2964             :  * In general, deduplication is not supposed to change the logical contents of
    2965             :  * an index.  Multiple index tuples are merged together into one equivalent
    2966             :  * posting list index tuple when convenient.
    2967             :  *
    2968             :  * heapallindexed verification must normalize-away this variation in
    2969             :  * representation by converting posting list tuples into two or more "plain"
    2970             :  * tuples.  Each tuple must be fingerprinted separately -- there must be one
    2971             :  * tuple for each corresponding Bloom filter probe during the heap scan.
    2972             :  *
    2973             :  * Note: Caller still needs to call bt_normalize_tuple() with returned tuple.
    2974             :  */
    2975             : static inline IndexTuple
    2976       54854 : bt_posting_plain_tuple(IndexTuple itup, int n)
    2977             : {
    2978             :     Assert(BTreeTupleIsPosting(itup));
    2979             : 
    2980             :     /* Returns non-posting-list tuple */
    2981       54854 :     return _bt_form_posting(itup, BTreeTupleGetPostingN(itup, n), 1);
    2982             : }
    2983             : 
    2984             : /*
    2985             :  * Search for itup in index, starting from fast root page.  itup must be a
    2986             :  * non-pivot tuple.  This is only supported with heapkeyspace indexes, since
    2987             :  * we rely on having fully unique keys to find a match with only a single
    2988             :  * visit to a leaf page, barring an interrupted page split, where we may have
    2989             :  * to move right.  (A concurrent page split is impossible because caller must
    2990             :  * be readonly caller.)
    2991             :  *
    2992             :  * This routine can detect very subtle transitive consistency issues across
    2993             :  * more than one level of the tree.  Leaf pages all have a high key (even the
    2994             :  * rightmost page has a conceptual positive infinity high key), but not a low
    2995             :  * key.  Their downlink in parent is a lower bound, which along with the high
    2996             :  * key is almost enough to detect every possible inconsistency.  A downlink
    2997             :  * separator key value won't always be available from parent, though, because
    2998             :  * the first items of internal pages are negative infinity items, truncated
    2999             :  * down to zero attributes during internal page splits.  While it's true that
    3000             :  * bt_child_check() and the high key check can detect most imaginable key
    3001             :  * space problems, there are remaining problems it won't detect with non-pivot
    3002             :  * tuples in cousin leaf pages.  Starting a search from the root for every
    3003             :  * existing leaf tuple detects small inconsistencies in upper levels of the
    3004             :  * tree that cannot be detected any other way.  (Besides all this, this is
    3005             :  * probably also useful as a direct test of the code used by index scans
    3006             :  * themselves.)
    3007             :  */
    3008             : static bool
    3009      402196 : bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
    3010             : {
    3011             :     BTScanInsert key;
    3012             :     BTStack     stack;
    3013             :     Buffer      lbuf;
    3014             :     bool        exists;
    3015             : 
    3016      402196 :     key = _bt_mkscankey(state->rel, itup);
    3017             :     Assert(key->heapkeyspace && key->scantid != NULL);
    3018             : 
    3019             :     /*
    3020             :      * Search from root.
    3021             :      *
    3022             :      * Ideally, we would arrange to only move right within _bt_search() when
    3023             :      * an interrupted page split is detected (i.e. when the incomplete split
    3024             :      * bit is found to be set), but for now we accept the possibility that
    3025             :      * that could conceal an inconsistency.
    3026             :      */
    3027             :     Assert(state->readonly && state->rootdescend);
    3028      402196 :     exists = false;
    3029      402196 :     stack = _bt_search(state->rel, NULL, key, &lbuf, BT_READ);
    3030             : 
    3031      402196 :     if (BufferIsValid(lbuf))
    3032             :     {
    3033             :         BTInsertStateData insertstate;
    3034             :         OffsetNumber offnum;
    3035             :         Page        page;
    3036             : 
    3037      402196 :         insertstate.itup = itup;
    3038      402196 :         insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
    3039      402196 :         insertstate.itup_key = key;
    3040      402196 :         insertstate.postingoff = 0;
    3041      402196 :         insertstate.bounds_valid = false;
    3042      402196 :         insertstate.buf = lbuf;
    3043             : 
    3044             :         /* Get matching tuple on leaf page */
    3045      402196 :         offnum = _bt_binsrch_insert(state->rel, &insertstate);
    3046             :         /* Compare first >= matching item on leaf page, if any */
    3047      402196 :         page = BufferGetPage(lbuf);
    3048             :         /* Should match on first heap TID when tuple has a posting list */
    3049      402196 :         if (offnum <= PageGetMaxOffsetNumber(page) &&
    3050      804392 :             insertstate.postingoff <= 0 &&
    3051      402196 :             _bt_compare(state->rel, key, page, offnum) == 0)
    3052      402196 :             exists = true;
    3053      402196 :         _bt_relbuf(state->rel, lbuf);
    3054             :     }
    3055             : 
    3056      402196 :     _bt_freestack(stack);
    3057      402196 :     pfree(key);
    3058             : 
    3059      402196 :     return exists;
    3060             : }
    3061             : 
    3062             : /*
    3063             :  * Is particular offset within page (whose special state is passed by caller)
    3064             :  * the page negative-infinity item?
    3065             :  *
    3066             :  * As noted in comments above _bt_compare(), there is special handling of the
    3067             :  * first data item as a "negative infinity" item.  The hard-coding within
    3068             :  * _bt_compare() makes comparing this item for the purposes of verification
    3069             :  * pointless at best, since the IndexTuple only contains a valid TID (a
    3070             :  * reference TID to child page).
    3071             :  */
    3072             : static inline bool
    3073     5259054 : offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
    3074             : {
    3075             :     /*
    3076             :      * For internal pages only, the first item after high key, if any, is
    3077             :      * negative infinity item.  Internal pages always have a negative infinity
    3078             :      * item, whereas leaf pages never have one.  This implies that negative
    3079             :      * infinity item is either first or second line item, or there is none
    3080             :      * within page.
    3081             :      *
    3082             :      * Negative infinity items are a special case among pivot tuples.  They
    3083             :      * always have zero attributes, while all other pivot tuples always have
    3084             :      * nkeyatts attributes.
    3085             :      *
    3086             :      * Right-most pages don't have a high key, but could be said to
    3087             :      * conceptually have a "positive infinity" high key.  Thus, there is a
    3088             :      * symmetry between down link items in parent pages, and high keys in
    3089             :      * children.  Together, they represent the part of the key space that
    3090             :      * belongs to each page in the index.  For example, all children of the
    3091             :      * root page will have negative infinity as a lower bound from root
    3092             :      * negative infinity downlink, and positive infinity as an upper bound
    3093             :      * (implicitly, from "imaginary" positive infinity high key in root).
    3094             :      */
    3095     5259054 :     return !P_ISLEAF(opaque) && offset == P_FIRSTDATAKEY(opaque);
    3096             : }
    3097             : 
    3098             : /*
    3099             :  * Does the invariant hold that the key is strictly less than a given upper
    3100             :  * bound offset item?
    3101             :  *
    3102             :  * Verifies line pointer on behalf of caller.
    3103             :  *
    3104             :  * If this function returns false, convention is that caller throws error due
    3105             :  * to corruption.
    3106             :  */
    3107             : static inline bool
    3108     4040168 : invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
    3109             :                    OffsetNumber upperbound)
    3110             : {
    3111             :     ItemId      itemid;
    3112             :     int32       cmp;
    3113             : 
    3114             :     Assert(!key->nextkey && key->backward);
    3115             : 
    3116             :     /* Verify line pointer before checking tuple */
    3117     4040168 :     itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
    3118             :                                   upperbound);
    3119             :     /* pg_upgrade'd indexes may legally have equal sibling tuples */
    3120     4040168 :     if (!key->heapkeyspace)
    3121           0 :         return invariant_leq_offset(state, key, upperbound);
    3122             : 
    3123     4040168 :     cmp = _bt_compare(state->rel, key, state->target, upperbound);
    3124             : 
    3125             :     /*
    3126             :      * _bt_compare() is capable of determining that a scankey with a
    3127             :      * filled-out attribute is greater than pivot tuples where the comparison
    3128             :      * is resolved at a truncated attribute (value of attribute in pivot is
    3129             :      * minus infinity).  However, it is not capable of determining that a
    3130             :      * scankey is _less than_ a tuple on the basis of a comparison resolved at
    3131             :      * _scankey_ minus infinity attribute.  Complete an extra step to simulate
    3132             :      * having minus infinity values for omitted scankey attribute(s).
    3133             :      */
    3134     4040168 :     if (cmp == 0)
    3135             :     {
    3136             :         BTPageOpaque topaque;
    3137             :         IndexTuple  ritup;
    3138             :         int         uppnkeyatts;
    3139             :         ItemPointer rheaptid;
    3140             :         bool        nonpivot;
    3141             : 
    3142           0 :         ritup = (IndexTuple) PageGetItem(state->target, itemid);
    3143           0 :         topaque = BTPageGetOpaque(state->target);
    3144           0 :         nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
    3145             : 
    3146             :         /* Get number of keys + heap TID for item to the right */
    3147           0 :         uppnkeyatts = BTreeTupleGetNKeyAtts(ritup, state->rel);
    3148           0 :         rheaptid = BTreeTupleGetHeapTIDCareful(state, ritup, nonpivot);
    3149             : 
    3150             :         /* Heap TID is tiebreaker key attribute */
    3151           0 :         if (key->keysz == uppnkeyatts)
    3152           0 :             return key->scantid == NULL && rheaptid != NULL;
    3153             : 
    3154           0 :         return key->keysz < uppnkeyatts;
    3155             :     }
    3156             : 
    3157     4040168 :     return cmp < 0;
    3158             : }
    3159             : 
    3160             : /*
    3161             :  * Does the invariant hold that the key is less than or equal to a given upper
    3162             :  * bound offset item?
    3163             :  *
    3164             :  * Caller should have verified that upperbound's line pointer is consistent
    3165             :  * using PageGetItemIdCareful() call.
    3166             :  *
    3167             :  * If this function returns false, convention is that caller throws error due
    3168             :  * to corruption.
    3169             :  */
    3170             : static inline bool
    3171     3698282 : invariant_leq_offset(BtreeCheckState *state, BTScanInsert key,
    3172             :                      OffsetNumber upperbound)
    3173             : {
    3174             :     int32       cmp;
    3175             : 
    3176             :     Assert(!key->nextkey && key->backward);
    3177             : 
    3178     3698282 :     cmp = _bt_compare(state->rel, key, state->target, upperbound);
    3179             : 
    3180     3698282 :     return cmp <= 0;
    3181             : }
    3182             : 
    3183             : /*
    3184             :  * Does the invariant hold that the key is strictly greater than a given lower
    3185             :  * bound offset item?
    3186             :  *
    3187             :  * Caller should have verified that lowerbound's line pointer is consistent
    3188             :  * using PageGetItemIdCareful() call.
    3189             :  *
    3190             :  * If this function returns false, convention is that caller throws error due
    3191             :  * to corruption.
    3192             :  */
    3193             : static inline bool
    3194       13228 : invariant_g_offset(BtreeCheckState *state, BTScanInsert key,
    3195             :                    OffsetNumber lowerbound)
    3196             : {
    3197             :     int32       cmp;
    3198             : 
    3199             :     Assert(!key->nextkey && key->backward);
    3200             : 
    3201       13228 :     cmp = _bt_compare(state->rel, key, state->target, lowerbound);
    3202             : 
    3203             :     /* pg_upgrade'd indexes may legally have equal sibling tuples */
    3204       13228 :     if (!key->heapkeyspace)
    3205           0 :         return cmp >= 0;
    3206             : 
    3207             :     /*
    3208             :      * No need to consider the possibility that scankey has attributes that we
    3209             :      * need to force to be interpreted as negative infinity.  _bt_compare() is
    3210             :      * able to determine that scankey is greater than negative infinity.  The
    3211             :      * distinction between "==" and "<" isn't interesting here, since
    3212             :      * corruption is indicated either way.
    3213             :      */
    3214       13228 :     return cmp > 0;
    3215             : }
    3216             : 
    3217             : /*
    3218             :  * Does the invariant hold that the key is strictly less than a given upper
    3219             :  * bound offset item, with the offset relating to a caller-supplied page that
    3220             :  * is not the current target page?
    3221             :  *
    3222             :  * Caller's non-target page is a child page of the target, checked as part of
    3223             :  * checking a property of the target page (i.e. the key comes from the
    3224             :  * target).  Verifies line pointer on behalf of caller.
    3225             :  *
    3226             :  * If this function returns false, convention is that caller throws error due
    3227             :  * to corruption.
    3228             :  */
    3229             : static inline bool
    3230     1196976 : invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key,
    3231             :                              BlockNumber nontargetblock, Page nontarget,
    3232             :                              OffsetNumber upperbound)
    3233             : {
    3234             :     ItemId      itemid;
    3235             :     int32       cmp;
    3236             : 
    3237             :     Assert(!key->nextkey && key->backward);
    3238             : 
    3239             :     /* Verify line pointer before checking tuple */
    3240     1196976 :     itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
    3241             :                                   upperbound);
    3242     1196976 :     cmp = _bt_compare(state->rel, key, nontarget, upperbound);
    3243             : 
    3244             :     /* pg_upgrade'd indexes may legally have equal sibling tuples */
    3245     1196976 :     if (!key->heapkeyspace)
    3246           0 :         return cmp <= 0;
    3247             : 
    3248             :     /* See invariant_l_offset() for an explanation of this extra step */
    3249     1196976 :     if (cmp == 0)
    3250             :     {
    3251             :         IndexTuple  child;
    3252             :         int         uppnkeyatts;
    3253             :         ItemPointer childheaptid;
    3254             :         BTPageOpaque copaque;
    3255             :         bool        nonpivot;
    3256             : 
    3257        3720 :         child = (IndexTuple) PageGetItem(nontarget, itemid);
    3258        3720 :         copaque = BTPageGetOpaque(nontarget);
    3259        3720 :         nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
    3260             : 
    3261             :         /* Get number of keys + heap TID for child/non-target item */
    3262        3720 :         uppnkeyatts = BTreeTupleGetNKeyAtts(child, state->rel);
    3263        3720 :         childheaptid = BTreeTupleGetHeapTIDCareful(state, child, nonpivot);
    3264             : 
    3265             :         /* Heap TID is tiebreaker key attribute */
    3266        3720 :         if (key->keysz == uppnkeyatts)
    3267        3720 :             return key->scantid == NULL && childheaptid != NULL;
    3268             : 
    3269           0 :         return key->keysz < uppnkeyatts;
    3270             :     }
    3271             : 
    3272     1193256 :     return cmp < 0;
    3273             : }
    3274             : 
    3275             : /*
    3276             :  * Given a block number of a B-Tree page, return page in palloc()'d memory.
    3277             :  * While at it, perform some basic checks of the page.
    3278             :  *
    3279             :  * There is never an attempt to get a consistent view of multiple pages using
    3280             :  * multiple concurrent buffer locks; in general, we only acquire a single pin
    3281             :  * and buffer lock at a time, which is often all that the nbtree code requires.
    3282             :  * (Actually, bt_recheck_sibling_links couples buffer locks, which is the only
    3283             :  * exception to this general rule.)
    3284             :  *
    3285             :  * Operating on a copy of the page is useful because it prevents control
    3286             :  * getting stuck in an uninterruptible state when an underlying operator class
    3287             :  * misbehaves.
    3288             :  */
    3289             : static Page
    3290       43164 : palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
    3291             : {
    3292             :     Buffer      buffer;
    3293             :     Page        page;
    3294             :     BTPageOpaque opaque;
    3295             :     OffsetNumber maxoffset;
    3296             : 
    3297       43164 :     page = palloc(BLCKSZ);
    3298             : 
    3299             :     /*
    3300             :      * We copy the page into local storage to avoid holding pin on the buffer
    3301             :      * longer than we must.
    3302             :      */
    3303       43164 :     buffer = ReadBufferExtended(state->rel, MAIN_FORKNUM, blocknum, RBM_NORMAL,
    3304             :                                 state->checkstrategy);
    3305       43140 :     LockBuffer(buffer, BT_READ);
    3306             : 
    3307             :     /*
    3308             :      * Perform the same basic sanity checking that nbtree itself performs for
    3309             :      * every page:
    3310             :      */
    3311       43140 :     _bt_checkpage(state->rel, buffer);
    3312             : 
    3313             :     /* Only use copy of page in palloc()'d memory */
    3314       43140 :     memcpy(page, BufferGetPage(buffer), BLCKSZ);
    3315       43140 :     UnlockReleaseBuffer(buffer);
    3316             : 
    3317       43140 :     opaque = BTPageGetOpaque(page);
    3318             : 
    3319       43140 :     if (P_ISMETA(opaque) && blocknum != BTREE_METAPAGE)
    3320           0 :         ereport(ERROR,
    3321             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3322             :                  errmsg("invalid meta page found at block %u in index \"%s\"",
    3323             :                         blocknum, RelationGetRelationName(state->rel))));
    3324             : 
    3325             :     /* Check page from block that ought to be meta page */
    3326       43140 :     if (blocknum == BTREE_METAPAGE)
    3327             :     {
    3328        7944 :         BTMetaPageData *metad = BTPageGetMeta(page);
    3329             : 
    3330        7944 :         if (!P_ISMETA(opaque) ||
    3331        7944 :             metad->btm_magic != BTREE_MAGIC)
    3332           0 :             ereport(ERROR,
    3333             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    3334             :                      errmsg("index \"%s\" meta page is corrupt",
    3335             :                             RelationGetRelationName(state->rel))));
    3336             : 
    3337        7944 :         if (metad->btm_version < BTREE_MIN_VERSION ||
    3338        7944 :             metad->btm_version > BTREE_VERSION)
    3339           0 :             ereport(ERROR,
    3340             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    3341             :                      errmsg("version mismatch in index \"%s\": file version %d, "
    3342             :                             "current version %d, minimum supported version %d",
    3343             :                             RelationGetRelationName(state->rel),
    3344             :                             metad->btm_version, BTREE_VERSION,
    3345             :                             BTREE_MIN_VERSION)));
    3346             : 
    3347             :         /* Finished with metapage checks */
    3348        7944 :         return page;
    3349             :     }
    3350             : 
    3351             :     /*
    3352             :      * Deleted pages that still use the old 32-bit XID representation have no
    3353             :      * sane "level" field because they type pun the field, but all other pages
    3354             :      * (including pages deleted on Postgres 14+) have a valid value.
    3355             :      */
    3356       35196 :     if (!P_ISDELETED(opaque) || P_HAS_FULLXID(opaque))
    3357             :     {
    3358             :         /* Okay, no reason not to trust btpo_level field from page */
    3359             : 
    3360       35196 :         if (P_ISLEAF(opaque) && opaque->btpo_level != 0)
    3361           0 :             ereport(ERROR,
    3362             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    3363             :                      errmsg_internal("invalid leaf page level %u for block %u in index \"%s\"",
    3364             :                                      opaque->btpo_level, blocknum,
    3365             :                                      RelationGetRelationName(state->rel))));
    3366             : 
    3367       35196 :         if (!P_ISLEAF(opaque) && opaque->btpo_level == 0)
    3368           0 :             ereport(ERROR,
    3369             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    3370             :                      errmsg_internal("invalid internal page level 0 for block %u in index \"%s\"",
    3371             :                                      blocknum,
    3372             :                                      RelationGetRelationName(state->rel))));
    3373             :     }
    3374             : 
    3375             :     /*
    3376             :      * Sanity checks for number of items on page.
    3377             :      *
    3378             :      * As noted at the beginning of _bt_binsrch(), an internal page must have
    3379             :      * children, since there must always be a negative infinity downlink
    3380             :      * (there may also be a highkey).  In the case of non-rightmost leaf
    3381             :      * pages, there must be at least a highkey.  The exceptions are deleted
    3382             :      * pages, which contain no items.
    3383             :      *
    3384             :      * This is correct when pages are half-dead, since internal pages are
    3385             :      * never half-dead, and leaf pages must have a high key when half-dead
    3386             :      * (the rightmost page can never be deleted).  It's also correct with
    3387             :      * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything
    3388             :      * about the target page other than setting the page as fully dead, and
    3389             :      * setting its xact field.  In particular, it doesn't change the sibling
    3390             :      * links in the deletion target itself, since they're required when index
    3391             :      * scans land on the deletion target, and then need to move right (or need
    3392             :      * to move left, in the case of backward index scans).
    3393             :      */
    3394       35196 :     maxoffset = PageGetMaxOffsetNumber(page);
    3395       35196 :     if (maxoffset > MaxIndexTuplesPerPage)
    3396           0 :         ereport(ERROR,
    3397             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3398             :                  errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)",
    3399             :                         blocknum, RelationGetRelationName(state->rel),
    3400             :                         MaxIndexTuplesPerPage)));
    3401             : 
    3402       35196 :     if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) && maxoffset < P_FIRSTDATAKEY(opaque))
    3403           0 :         ereport(ERROR,
    3404             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3405             :                  errmsg("internal block %u in index \"%s\" lacks high key and/or at least one downlink",
    3406             :                         blocknum, RelationGetRelationName(state->rel))));
    3407             : 
    3408       35196 :     if (P_ISLEAF(opaque) && !P_ISDELETED(opaque) && !P_RIGHTMOST(opaque) && maxoffset < P_HIKEY)
    3409           0 :         ereport(ERROR,
    3410             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3411             :                  errmsg("non-rightmost leaf block %u in index \"%s\" lacks high key item",
    3412             :                         blocknum, RelationGetRelationName(state->rel))));
    3413             : 
    3414             :     /*
    3415             :      * In general, internal pages are never marked half-dead, except on
    3416             :      * versions of Postgres prior to 9.4, where it can be valid transient
    3417             :      * state.  This state is nonetheless treated as corruption by VACUUM on
    3418             :      * from version 9.4 on, so do the same here.  See _bt_pagedel() for full
    3419             :      * details.
    3420             :      */
    3421       35196 :     if (!P_ISLEAF(opaque) && P_ISHALFDEAD(opaque))
    3422           0 :         ereport(ERROR,
    3423             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3424             :                  errmsg("internal page block %u in index \"%s\" is half-dead",
    3425             :                         blocknum, RelationGetRelationName(state->rel)),
    3426             :                  errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
    3427             : 
    3428             :     /*
    3429             :      * Check that internal pages have no garbage items, and that no page has
    3430             :      * an invalid combination of deletion-related page level flags
    3431             :      */
    3432       35196 :     if (!P_ISLEAF(opaque) && P_HAS_GARBAGE(opaque))
    3433           0 :         ereport(ERROR,
    3434             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3435             :                  errmsg_internal("internal page block %u in index \"%s\" has garbage items",
    3436             :                                  blocknum, RelationGetRelationName(state->rel))));
    3437             : 
    3438       35196 :     if (P_HAS_FULLXID(opaque) && !P_ISDELETED(opaque))
    3439           0 :         ereport(ERROR,
    3440             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3441             :                  errmsg_internal("full transaction id page flag appears in non-deleted block %u in index \"%s\"",
    3442             :                                  blocknum, RelationGetRelationName(state->rel))));
    3443             : 
    3444       35196 :     if (P_ISDELETED(opaque) && P_ISHALFDEAD(opaque))
    3445           0 :         ereport(ERROR,
    3446             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3447             :                  errmsg_internal("deleted page block %u in index \"%s\" is half-dead",
    3448             :                                  blocknum, RelationGetRelationName(state->rel))));
    3449             : 
    3450       35196 :     return page;
    3451             : }
    3452             : 
    3453             : /*
    3454             :  * _bt_mkscankey() wrapper that automatically prevents insertion scankey from
    3455             :  * being considered greater than the pivot tuple that its values originated
    3456             :  * from (or some other identical pivot tuple) in the common case where there
    3457             :  * are truncated/minus infinity attributes.  Without this extra step, there
    3458             :  * are forms of corruption that amcheck could theoretically fail to report.
    3459             :  *
    3460             :  * For example, invariant_g_offset() might miss a cross-page invariant failure
    3461             :  * on an internal level if the scankey built from the first item on the
    3462             :  * target's right sibling page happened to be equal to (not greater than) the
    3463             :  * last item on target page.  The !backward tiebreaker in _bt_compare() might
    3464             :  * otherwise cause amcheck to assume (rather than actually verify) that the
    3465             :  * scankey is greater.
    3466             :  */
    3467             : static inline BTScanInsert
    3468     4070466 : bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
    3469             : {
    3470             :     BTScanInsert skey;
    3471             : 
    3472     4070466 :     skey = _bt_mkscankey(rel, itup);
    3473     4070466 :     skey->backward = true;
    3474             : 
    3475     4070466 :     return skey;
    3476             : }
    3477             : 
    3478             : /*
    3479             :  * PageGetItemId() wrapper that validates returned line pointer.
    3480             :  *
    3481             :  * Buffer page/page item access macros generally trust that line pointers are
    3482             :  * not corrupt, which might cause problems for verification itself.  For
    3483             :  * example, there is no bounds checking in PageGetItem().  Passing it a
    3484             :  * corrupt line pointer can cause it to return a tuple/pointer that is unsafe
    3485             :  * to dereference.
    3486             :  *
    3487             :  * Validating line pointers before tuples avoids undefined behavior and
    3488             :  * assertion failures with corrupt indexes, making the verification process
    3489             :  * more robust and predictable.
    3490             :  */
    3491             : static ItemId
    3492     9341716 : PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page,
    3493             :                      OffsetNumber offset)
    3494             : {
    3495     9341716 :     ItemId      itemid = PageGetItemId(page, offset);
    3496             : 
    3497     9341716 :     if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
    3498             :         BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData)))
    3499           0 :         ereport(ERROR,
    3500             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3501             :                  errmsg("line pointer points past end of tuple space in index \"%s\"",
    3502             :                         RelationGetRelationName(state->rel)),
    3503             :                  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
    3504             :                                     block, offset, ItemIdGetOffset(itemid),
    3505             :                                     ItemIdGetLength(itemid),
    3506             :                                     ItemIdGetFlags(itemid))));
    3507             : 
    3508             :     /*
    3509             :      * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
    3510             :      * never uses either.  Verify that line pointer has storage, too, since
    3511             :      * even LP_DEAD items should within nbtree.
    3512             :      */
    3513     9341716 :     if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
    3514     9341716 :         ItemIdGetLength(itemid) == 0)
    3515           0 :         ereport(ERROR,
    3516             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3517             :                  errmsg("invalid line pointer storage in index \"%s\"",
    3518             :                         RelationGetRelationName(state->rel)),
    3519             :                  errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
    3520             :                                     block, offset, ItemIdGetOffset(itemid),
    3521             :                                     ItemIdGetLength(itemid),
    3522             :                                     ItemIdGetFlags(itemid))));
    3523             : 
    3524     9341716 :     return itemid;
    3525             : }
    3526             : 
    3527             : /*
    3528             :  * BTreeTupleGetHeapTID() wrapper that enforces that a heap TID is present in
    3529             :  * cases where that is mandatory (i.e. for non-pivot tuples)
    3530             :  */
    3531             : static inline ItemPointer
    3532        3720 : BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup,
    3533             :                             bool nonpivot)
    3534             : {
    3535             :     ItemPointer htid;
    3536             : 
    3537             :     /*
    3538             :      * Caller determines whether this is supposed to be a pivot or non-pivot
    3539             :      * tuple using page type and item offset number.  Verify that tuple
    3540             :      * metadata agrees with this.
    3541             :      */
    3542             :     Assert(state->heapkeyspace);
    3543        3720 :     if (BTreeTupleIsPivot(itup) && nonpivot)
    3544           0 :         ereport(ERROR,
    3545             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3546             :                  errmsg_internal("block %u or its right sibling block or child block in index \"%s\" has unexpected pivot tuple",
    3547             :                                  state->targetblock,
    3548             :                                  RelationGetRelationName(state->rel))));
    3549             : 
    3550        3720 :     if (!BTreeTupleIsPivot(itup) && !nonpivot)
    3551           0 :         ereport(ERROR,
    3552             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3553             :                  errmsg_internal("block %u or its right sibling block or child block in index \"%s\" has unexpected non-pivot tuple",
    3554             :                                  state->targetblock,
    3555             :                                  RelationGetRelationName(state->rel))));
    3556             : 
    3557        3720 :     htid = BTreeTupleGetHeapTID(itup);
    3558        3720 :     if (!ItemPointerIsValid(htid) && nonpivot)
    3559           0 :         ereport(ERROR,
    3560             :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    3561             :                  errmsg("block %u or its right sibling block or child block in index \"%s\" contains non-pivot tuple that lacks a heap TID",
    3562             :                         state->targetblock,
    3563             :                         RelationGetRelationName(state->rel))));
    3564             : 
    3565        3720 :     return htid;
    3566             : }
    3567             : 
    3568             : /*
    3569             :  * Return the "pointed to" TID for itup, which is used to generate a
    3570             :  * descriptive error message.  itup must be a "data item" tuple (it wouldn't
    3571             :  * make much sense to call here with a high key tuple, since there won't be a
    3572             :  * valid downlink/block number to display).
    3573             :  *
    3574             :  * Returns either a heap TID (which will be the first heap TID in posting list
    3575             :  * if itup is posting list tuple), or a TID that contains downlink block
    3576             :  * number, plus some encoded metadata (e.g., the number of attributes present
    3577             :  * in itup).
    3578             :  */
    3579             : static inline ItemPointer
    3580          12 : BTreeTupleGetPointsToTID(IndexTuple itup)
    3581             : {
    3582             :     /*
    3583             :      * Rely on the assumption that !heapkeyspace internal page data items will
    3584             :      * correctly return TID with downlink here -- BTreeTupleGetHeapTID() won't
    3585             :      * recognize it as a pivot tuple, but everything still works out because
    3586             :      * the t_tid field is still returned
    3587             :      */
    3588          12 :     if (!BTreeTupleIsPivot(itup))
    3589           8 :         return BTreeTupleGetHeapTID(itup);
    3590             : 
    3591             :     /* Pivot tuple returns TID with downlink block (heapkeyspace variant) */
    3592           4 :     return &itup->t_tid;
    3593             : }

Generated by: LCOV version 1.16