LCOV - code coverage report
Current view: top level - src/backend/access/nbtree - nbtinsert.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 676 764 88.5 %
Date: 2025-11-20 05:17:59 Functions: 15 16 93.8 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nbtinsert.c
       4             :  *    Item insertion in Lehman and Yao btrees for Postgres.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/nbtree/nbtinsert.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : 
      16             : #include "postgres.h"
      17             : 
      18             : #include "access/nbtree.h"
      19             : #include "access/nbtxlog.h"
      20             : #include "access/tableam.h"
      21             : #include "access/transam.h"
      22             : #include "access/xloginsert.h"
      23             : #include "common/int.h"
      24             : #include "common/pg_prng.h"
      25             : #include "lib/qunique.h"
      26             : #include "miscadmin.h"
      27             : #include "storage/lmgr.h"
      28             : #include "storage/predicate.h"
      29             : 
      30             : /* Minimum tree height for application of fastpath optimization */
      31             : #define BTREE_FASTPATH_MIN_LEVEL    2
      32             : 
      33             : 
      34             : static BTStack _bt_search_insert(Relation rel, Relation heaprel,
      35             :                                  BTInsertState insertstate);
      36             : static TransactionId _bt_check_unique(Relation rel, BTInsertState insertstate,
      37             :                                       Relation heapRel,
      38             :                                       IndexUniqueCheck checkUnique, bool *is_unique,
      39             :                                       uint32 *speculativeToken);
      40             : static OffsetNumber _bt_findinsertloc(Relation rel,
      41             :                                       BTInsertState insertstate,
      42             :                                       bool checkingunique,
      43             :                                       bool indexUnchanged,
      44             :                                       BTStack stack,
      45             :                                       Relation heapRel);
      46             : static void _bt_stepright(Relation rel, Relation heaprel,
      47             :                           BTInsertState insertstate, BTStack stack);
      48             : static void _bt_insertonpg(Relation rel, Relation heaprel, BTScanInsert itup_key,
      49             :                            Buffer buf,
      50             :                            Buffer cbuf,
      51             :                            BTStack stack,
      52             :                            IndexTuple itup,
      53             :                            Size itemsz,
      54             :                            OffsetNumber newitemoff,
      55             :                            int postingoff,
      56             :                            bool split_only_page);
      57             : static Buffer _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key,
      58             :                         Buffer buf, Buffer cbuf, OffsetNumber newitemoff,
      59             :                         Size newitemsz, IndexTuple newitem, IndexTuple orignewitem,
      60             :                         IndexTuple nposting, uint16 postingoff);
      61             : static void _bt_insert_parent(Relation rel, Relation heaprel, Buffer buf,
      62             :                               Buffer rbuf, BTStack stack, bool isroot, bool isonly);
      63             : static Buffer _bt_newlevel(Relation rel, Relation heaprel, Buffer lbuf, Buffer rbuf);
      64             : static inline bool _bt_pgaddtup(Page page, Size itemsize, const IndexTupleData *itup,
      65             :                                 OffsetNumber itup_off, bool newfirstdataitem);
      66             : static void _bt_delete_or_dedup_one_page(Relation rel, Relation heapRel,
      67             :                                          BTInsertState insertstate,
      68             :                                          bool simpleonly, bool checkingunique,
      69             :                                          bool uniquedup, bool indexUnchanged);
      70             : static void _bt_simpledel_pass(Relation rel, Buffer buffer, Relation heapRel,
      71             :                                OffsetNumber *deletable, int ndeletable,
      72             :                                IndexTuple newitem, OffsetNumber minoff,
      73             :                                OffsetNumber maxoff);
      74             : static BlockNumber *_bt_deadblocks(Page page, OffsetNumber *deletable,
      75             :                                    int ndeletable, IndexTuple newitem,
      76             :                                    int *nblocks);
      77             : static inline int _bt_blk_cmp(const void *arg1, const void *arg2);
      78             : 
      79             : /*
      80             :  *  _bt_doinsert() -- Handle insertion of a single index tuple in the tree.
      81             :  *
      82             :  *      This routine is called by the public interface routine, btinsert.
      83             :  *      By here, itup is filled in, including the TID.
      84             :  *
      85             :  *      If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
      86             :  *      will allow duplicates.  Otherwise (UNIQUE_CHECK_YES or
      87             :  *      UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
      88             :  *      For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
      89             :  *      don't actually insert.
      90             :  *
      91             :  *      indexUnchanged executor hint indicates if itup is from an
      92             :  *      UPDATE that didn't logically change the indexed value, but
      93             :  *      must nevertheless have a new entry to point to a successor
      94             :  *      version.
      95             :  *
      96             :  *      The result value is only significant for UNIQUE_CHECK_PARTIAL:
      97             :  *      it must be true if the entry is known unique, else false.
      98             :  *      (In the current implementation we'll also return true after a
      99             :  *      successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
     100             :  *      that's just a coding artifact.)
     101             :  */
     102             : bool
     103     7300360 : _bt_doinsert(Relation rel, IndexTuple itup,
     104             :              IndexUniqueCheck checkUnique, bool indexUnchanged,
     105             :              Relation heapRel)
     106             : {
     107     7300360 :     bool        is_unique = false;
     108             :     BTInsertStateData insertstate;
     109             :     BTScanInsert itup_key;
     110             :     BTStack     stack;
     111     7300360 :     bool        checkingunique = (checkUnique != UNIQUE_CHECK_NO);
     112             : 
     113             :     /* we need an insertion scan key to do our search, so build one */
     114     7300360 :     itup_key = _bt_mkscankey(rel, itup);
     115             : 
     116     7300360 :     if (checkingunique)
     117             :     {
     118     5242892 :         if (!itup_key->anynullkeys)
     119             :         {
     120             :             /* No (heapkeyspace) scantid until uniqueness established */
     121     5222718 :             itup_key->scantid = NULL;
     122             :         }
     123             :         else
     124             :         {
     125             :             /*
     126             :              * Scan key for new tuple contains NULL key values.  Bypass
     127             :              * checkingunique steps.  They are unnecessary because core code
     128             :              * considers NULL unequal to every value, including NULL.
     129             :              *
     130             :              * This optimization avoids O(N^2) behavior within the
     131             :              * _bt_findinsertloc() heapkeyspace path when a unique index has a
     132             :              * large number of "duplicates" with NULL key values.
     133             :              */
     134       20174 :             checkingunique = false;
     135             :             /* Tuple is unique in the sense that core code cares about */
     136             :             Assert(checkUnique != UNIQUE_CHECK_EXISTING);
     137       20174 :             is_unique = true;
     138             :         }
     139             :     }
     140             : 
     141             :     /*
     142             :      * Fill in the BTInsertState working area, to track the current page and
     143             :      * position within the page to insert on.
     144             :      *
     145             :      * Note that itemsz is passed down to lower level code that deals with
     146             :      * inserting the item.  It must be MAXALIGN()'d.  This ensures that space
     147             :      * accounting code consistently considers the alignment overhead that we
     148             :      * expect PageAddItem() will add later.  (Actually, index_form_tuple() is
     149             :      * already conservative about alignment, but we don't rely on that from
     150             :      * this distance.  Besides, preserving the "true" tuple size in index
     151             :      * tuple headers for the benefit of nbtsplitloc.c might happen someday.
     152             :      * Note that heapam does not MAXALIGN() each heap tuple's lp_len field.)
     153             :      */
     154     7300360 :     insertstate.itup = itup;
     155     7300360 :     insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
     156     7300360 :     insertstate.itup_key = itup_key;
     157     7300360 :     insertstate.bounds_valid = false;
     158     7300360 :     insertstate.buf = InvalidBuffer;
     159     7300360 :     insertstate.postingoff = 0;
     160             : 
     161     7300384 : search:
     162             : 
     163             :     /*
     164             :      * Find and lock the leaf page that the tuple should be added to by
     165             :      * searching from the root page.  insertstate.buf will hold a buffer that
     166             :      * is locked in exclusive mode afterwards.
     167             :      */
     168     7300384 :     stack = _bt_search_insert(rel, heapRel, &insertstate);
     169             : 
     170             :     /*
     171             :      * checkingunique inserts are not allowed to go ahead when two tuples with
     172             :      * equal key attribute values would be visible to new MVCC snapshots once
     173             :      * the xact commits.  Check for conflicts in the locked page/buffer (if
     174             :      * needed) here.
     175             :      *
     176             :      * It might be necessary to check a page to the right in _bt_check_unique,
     177             :      * though that should be very rare.  In practice the first page the value
     178             :      * could be on (with scantid omitted) is almost always also the only page
     179             :      * that a matching tuple might be found on.  This is due to the behavior
     180             :      * of _bt_findsplitloc with duplicate tuples -- a group of duplicates can
     181             :      * only be allowed to cross a page boundary when there is no candidate
     182             :      * leaf page split point that avoids it.  Also, _bt_check_unique can use
     183             :      * the leaf page high key to determine that there will be no duplicates on
     184             :      * the right sibling without actually visiting it (it uses the high key in
     185             :      * cases where the new item happens to belong at the far right of the leaf
     186             :      * page).
     187             :      *
     188             :      * NOTE: obviously, _bt_check_unique can only detect keys that are already
     189             :      * in the index; so it cannot defend against concurrent insertions of the
     190             :      * same key.  We protect against that by means of holding a write lock on
     191             :      * the first page the value could be on, with omitted/-inf value for the
     192             :      * implicit heap TID tiebreaker attribute.  Any other would-be inserter of
     193             :      * the same key must acquire a write lock on the same page, so only one
     194             :      * would-be inserter can be making the check at one time.  Furthermore,
     195             :      * once we are past the check we hold write locks continuously until we
     196             :      * have performed our insertion, so no later inserter can fail to see our
     197             :      * insertion.  (This requires some care in _bt_findinsertloc.)
     198             :      *
     199             :      * If we must wait for another xact, we release the lock while waiting,
     200             :      * and then must perform a new search.
     201             :      *
     202             :      * For a partial uniqueness check, we don't wait for the other xact. Just
     203             :      * let the tuple in and return false for possibly non-unique, or true for
     204             :      * definitely unique.
     205             :      */
     206     7300384 :     if (checkingunique)
     207             :     {
     208             :         TransactionId xwait;
     209             :         uint32      speculativeToken;
     210             : 
     211     5222742 :         xwait = _bt_check_unique(rel, &insertstate, heapRel, checkUnique,
     212             :                                  &is_unique, &speculativeToken);
     213             : 
     214     5222214 :         if (unlikely(TransactionIdIsValid(xwait)))
     215             :         {
     216             :             /* Have to wait for the other guy ... */
     217          24 :             _bt_relbuf(rel, insertstate.buf);
     218          24 :             insertstate.buf = InvalidBuffer;
     219             : 
     220             :             /*
     221             :              * If it's a speculative insertion, wait for it to finish (ie. to
     222             :              * go ahead with the insertion, or kill the tuple).  Otherwise
     223             :              * wait for the transaction to finish as usual.
     224             :              */
     225          24 :             if (speculativeToken)
     226           0 :                 SpeculativeInsertionWait(xwait, speculativeToken);
     227             :             else
     228          24 :                 XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
     229             : 
     230             :             /* start over... */
     231          24 :             if (stack)
     232           0 :                 _bt_freestack(stack);
     233          24 :             goto search;
     234             :         }
     235             : 
     236             :         /* Uniqueness is established -- restore heap tid as scantid */
     237     5222190 :         if (itup_key->heapkeyspace)
     238     5222190 :             itup_key->scantid = &itup->t_tid;
     239             :     }
     240             : 
     241     7299832 :     if (checkUnique != UNIQUE_CHECK_EXISTING)
     242             :     {
     243             :         OffsetNumber newitemoff;
     244             : 
     245             :         /*
     246             :          * The only conflict predicate locking cares about for indexes is when
     247             :          * an index tuple insert conflicts with an existing lock.  We don't
     248             :          * know the actual page we're going to insert on for sure just yet in
     249             :          * checkingunique and !heapkeyspace cases, but it's okay to use the
     250             :          * first page the value could be on (with scantid omitted) instead.
     251             :          */
     252     7299778 :         CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate.buf));
     253             : 
     254             :         /*
     255             :          * Do the insertion.  Note that insertstate contains cached binary
     256             :          * search bounds established within _bt_check_unique when insertion is
     257             :          * checkingunique.
     258             :          */
     259     7299772 :         newitemoff = _bt_findinsertloc(rel, &insertstate, checkingunique,
     260             :                                        indexUnchanged, stack, heapRel);
     261     7299772 :         _bt_insertonpg(rel, heapRel, itup_key, insertstate.buf, InvalidBuffer,
     262             :                        stack, itup, insertstate.itemsz, newitemoff,
     263             :                        insertstate.postingoff, false);
     264             :     }
     265             :     else
     266             :     {
     267             :         /* just release the buffer */
     268          54 :         _bt_relbuf(rel, insertstate.buf);
     269             :     }
     270             : 
     271             :     /* be tidy */
     272     7299826 :     if (stack)
     273     6337424 :         _bt_freestack(stack);
     274     7299826 :     pfree(itup_key);
     275             : 
     276     7299826 :     return is_unique;
     277             : }
     278             : 
     279             : /*
     280             :  *  _bt_search_insert() -- _bt_search() wrapper for inserts
     281             :  *
     282             :  * Search the tree for a particular scankey, or more precisely for the first
     283             :  * leaf page it could be on.  Try to make use of the fastpath optimization's
     284             :  * rightmost leaf page cache before actually searching the tree from the root
     285             :  * page, though.
     286             :  *
     287             :  * Return value is a stack of parent-page pointers (though see notes about
     288             :  * fastpath optimization and page splits below).  insertstate->buf is set to
     289             :  * the address of the leaf-page buffer, which is write-locked and pinned in
     290             :  * all cases (if necessary by creating a new empty root page for caller).
     291             :  *
     292             :  * The fastpath optimization avoids most of the work of searching the tree
     293             :  * repeatedly when a single backend inserts successive new tuples on the
     294             :  * rightmost leaf page of an index.  A backend cache of the rightmost leaf
     295             :  * page is maintained within _bt_insertonpg(), and used here.  The cache is
     296             :  * invalidated here when an insert of a non-pivot tuple must take place on a
     297             :  * non-rightmost leaf page.
     298             :  *
     299             :  * The optimization helps with indexes on an auto-incremented field.  It also
     300             :  * helps with indexes on datetime columns, as well as indexes with lots of
     301             :  * NULL values.  (NULLs usually get inserted in the rightmost page for single
     302             :  * column indexes, since they usually get treated as coming after everything
     303             :  * else in the key space.  Individual NULL tuples will generally be placed on
     304             :  * the rightmost leaf page due to the influence of the heap TID column.)
     305             :  *
     306             :  * Note that we avoid applying the optimization when there is insufficient
     307             :  * space on the rightmost page to fit caller's new item.  This is necessary
     308             :  * because we'll need to return a real descent stack when a page split is
     309             :  * expected (actually, caller can cope with a leaf page split that uses a NULL
     310             :  * stack, but that's very slow and so must be avoided).  Note also that the
     311             :  * fastpath optimization acquires the lock on the page conditionally as a way
     312             :  * of reducing extra contention when there are concurrent insertions into the
     313             :  * rightmost page (we give up if we'd have to wait for the lock).  We assume
     314             :  * that it isn't useful to apply the optimization when there is contention,
     315             :  * since each per-backend cache won't stay valid for long.
     316             :  */
     317             : static BTStack
     318     7300384 : _bt_search_insert(Relation rel, Relation heaprel, BTInsertState insertstate)
     319             : {
     320             :     Assert(insertstate->buf == InvalidBuffer);
     321             :     Assert(!insertstate->bounds_valid);
     322             :     Assert(insertstate->postingoff == 0);
     323             : 
     324     7300384 :     if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
     325             :     {
     326             :         /* Simulate a _bt_getbuf() call with conditional locking */
     327       69352 :         insertstate->buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
     328       69352 :         if (_bt_conditionallockbuf(rel, insertstate->buf))
     329             :         {
     330             :             Page        page;
     331             :             BTPageOpaque opaque;
     332             : 
     333       68932 :             _bt_checkpage(rel, insertstate->buf);
     334       68932 :             page = BufferGetPage(insertstate->buf);
     335       68932 :             opaque = BTPageGetOpaque(page);
     336             : 
     337             :             /*
     338             :              * Check if the page is still the rightmost leaf page and has
     339             :              * enough free space to accommodate the new tuple.  Also check
     340             :              * that the insertion scan key is strictly greater than the first
     341             :              * non-pivot tuple on the page.  (Note that we expect itup_key's
     342             :              * scantid to be unset when our caller is a checkingunique
     343             :              * inserter.)
     344             :              */
     345       68932 :             if (P_RIGHTMOST(opaque) &&
     346       68850 :                 P_ISLEAF(opaque) &&
     347       68850 :                 !P_IGNORE(opaque) &&
     348      137292 :                 PageGetFreeSpace(page) > insertstate->itemsz &&
     349      136884 :                 PageGetMaxOffsetNumber(page) >= P_HIKEY &&
     350       68442 :                 _bt_compare(rel, insertstate->itup_key, page, P_HIKEY) > 0)
     351             :             {
     352             :                 /*
     353             :                  * Caller can use the fastpath optimization because cached
     354             :                  * block is still rightmost leaf page, which can fit caller's
     355             :                  * new tuple without splitting.  Keep block in local cache for
     356             :                  * next insert, and have caller use NULL stack.
     357             :                  *
     358             :                  * Note that _bt_insert_parent() has an assertion that catches
     359             :                  * leaf page splits that somehow follow from a fastpath insert
     360             :                  * (it should only be passed a NULL stack when it must deal
     361             :                  * with a concurrent root page split, and never because a NULL
     362             :                  * stack was returned here).
     363             :                  */
     364       68396 :                 return NULL;
     365             :             }
     366             : 
     367             :             /* Page unsuitable for caller, drop lock and pin */
     368         536 :             _bt_relbuf(rel, insertstate->buf);
     369             :         }
     370             :         else
     371             :         {
     372             :             /* Lock unavailable, drop pin */
     373         420 :             ReleaseBuffer(insertstate->buf);
     374             :         }
     375             : 
     376             :         /* Forget block, since cache doesn't appear to be useful */
     377         956 :         RelationSetTargetBlock(rel, InvalidBlockNumber);
     378             :     }
     379             : 
     380             :     /* Cannot use optimization -- descend tree, return proper descent stack */
     381     7231988 :     return _bt_search(rel, heaprel, insertstate->itup_key, &insertstate->buf,
     382             :                       BT_WRITE);
     383             : }
     384             : 
     385             : /*
     386             :  *  _bt_check_unique() -- Check for violation of unique index constraint
     387             :  *
     388             :  * Returns InvalidTransactionId if there is no conflict, else an xact ID
     389             :  * we must wait for to see if it commits a conflicting tuple.   If an actual
     390             :  * conflict is detected, no return --- just ereport().  If an xact ID is
     391             :  * returned, and the conflicting tuple still has a speculative insertion in
     392             :  * progress, *speculativeToken is set to non-zero, and the caller can wait for
     393             :  * the verdict on the insertion using SpeculativeInsertionWait().
     394             :  *
     395             :  * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
     396             :  * InvalidTransactionId because we don't want to wait.  In this case we
     397             :  * set *is_unique to false if there is a potential conflict, and the
     398             :  * core code must redo the uniqueness check later.
     399             :  *
     400             :  * As a side-effect, sets state in insertstate that can later be used by
     401             :  * _bt_findinsertloc() to reuse most of the binary search work we do
     402             :  * here.
     403             :  *
     404             :  * This code treats NULLs as equal, unlike the default semantics for unique
     405             :  * indexes.  So do not call here when there are NULL values in scan key and
     406             :  * the index uses the default NULLS DISTINCT mode.
     407             :  */
     408             : static TransactionId
     409     5222742 : _bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel,
     410             :                  IndexUniqueCheck checkUnique, bool *is_unique,
     411             :                  uint32 *speculativeToken)
     412             : {
     413     5222742 :     IndexTuple  itup = insertstate->itup;
     414     5222742 :     IndexTuple  curitup = NULL;
     415     5222742 :     ItemId      curitemid = NULL;
     416     5222742 :     BTScanInsert itup_key = insertstate->itup_key;
     417             :     SnapshotData SnapshotDirty;
     418             :     OffsetNumber offset;
     419             :     OffsetNumber maxoff;
     420             :     Page        page;
     421             :     BTPageOpaque opaque;
     422     5222742 :     Buffer      nbuf = InvalidBuffer;
     423     5222742 :     bool        found = false;
     424     5222742 :     bool        inposting = false;
     425     5222742 :     bool        prevalldead = true;
     426     5222742 :     int         curposti = 0;
     427             : 
     428             :     /* Assume unique until we find a duplicate */
     429     5222742 :     *is_unique = true;
     430             : 
     431     5222742 :     InitDirtySnapshot(SnapshotDirty);
     432             : 
     433     5222742 :     page = BufferGetPage(insertstate->buf);
     434     5222742 :     opaque = BTPageGetOpaque(page);
     435     5222742 :     maxoff = PageGetMaxOffsetNumber(page);
     436             : 
     437             :     /*
     438             :      * Find the first tuple with the same key.
     439             :      *
     440             :      * This also saves the binary search bounds in insertstate.  We use them
     441             :      * in the fastpath below, but also in the _bt_findinsertloc() call later.
     442             :      */
     443             :     Assert(!insertstate->bounds_valid);
     444     5222742 :     offset = _bt_binsrch_insert(rel, insertstate);
     445             : 
     446             :     /*
     447             :      * Scan over all equal tuples, looking for live conflicts.
     448             :      */
     449             :     Assert(!insertstate->bounds_valid || insertstate->low == offset);
     450             :     Assert(!itup_key->anynullkeys);
     451             :     Assert(itup_key->scantid == NULL);
     452             :     for (;;)
     453             :     {
     454             :         /*
     455             :          * Each iteration of the loop processes one heap TID, not one index
     456             :          * tuple.  Current offset number for page isn't usually advanced on
     457             :          * iterations that process heap TIDs from posting list tuples.
     458             :          *
     459             :          * "inposting" state is set when _inside_ a posting list --- not when
     460             :          * we're at the start (or end) of a posting list.  We advance curposti
     461             :          * at the end of the iteration when inside a posting list tuple.  In
     462             :          * general, every loop iteration either advances the page offset or
     463             :          * advances curposti --- an iteration that handles the rightmost/max
     464             :          * heap TID in a posting list finally advances the page offset (and
     465             :          * unsets "inposting").
     466             :          *
     467             :          * Make sure the offset points to an actual index tuple before trying
     468             :          * to examine it...
     469             :          */
     470    17221376 :         if (offset <= maxoff)
     471             :         {
     472             :             /*
     473             :              * Fastpath: In most cases, we can use cached search bounds to
     474             :              * limit our consideration to items that are definitely
     475             :              * duplicates.  This fastpath doesn't apply when the original page
     476             :              * is empty, or when initial offset is past the end of the
     477             :              * original page, which may indicate that we need to examine a
     478             :              * second or subsequent page.
     479             :              *
     480             :              * Note that this optimization allows us to avoid calling
     481             :              * _bt_compare() directly when there are no duplicates, as long as
     482             :              * the offset where the key will go is not at the end of the page.
     483             :              */
     484    14295606 :             if (nbuf == InvalidBuffer && offset == insertstate->stricthigh)
     485             :             {
     486             :                 Assert(insertstate->bounds_valid);
     487             :                 Assert(insertstate->low >= P_FIRSTDATAKEY(opaque));
     488             :                 Assert(insertstate->low <= insertstate->stricthigh);
     489             :                 Assert(_bt_compare(rel, itup_key, page, offset) < 0);
     490     2064076 :                 break;
     491             :             }
     492             : 
     493             :             /*
     494             :              * We can skip items that are already marked killed.
     495             :              *
     496             :              * In the presence of heavy update activity an index may contain
     497             :              * many killed items with the same key; running _bt_compare() on
     498             :              * each killed item gets expensive.  Just advance over killed
     499             :              * items as quickly as we can.  We only apply _bt_compare() when
     500             :              * we get to a non-killed item.  We could reuse the bounds to
     501             :              * avoid _bt_compare() calls for known equal tuples, but it
     502             :              * doesn't seem worth it.
     503             :              */
     504    12231530 :             if (!inposting)
     505     7714164 :                 curitemid = PageGetItemId(page, offset);
     506    12231530 :             if (inposting || !ItemIdIsDead(curitemid))
     507             :             {
     508             :                 ItemPointerData htid;
     509    11672396 :                 bool        all_dead = false;
     510             : 
     511    11672396 :                 if (!inposting)
     512             :                 {
     513             :                     /* Plain tuple, or first TID in posting list tuple */
     514     7155030 :                     if (_bt_compare(rel, itup_key, page, offset) != 0)
     515      206942 :                         break;  /* we're past all the equal tuples */
     516             : 
     517             :                     /* Advanced curitup */
     518     6948088 :                     curitup = (IndexTuple) PageGetItem(page, curitemid);
     519             :                     Assert(!BTreeTupleIsPivot(curitup));
     520             :                 }
     521             : 
     522             :                 /* okay, we gotta fetch the heap tuple using htid ... */
     523    11465454 :                 if (!BTreeTupleIsPosting(curitup))
     524             :                 {
     525             :                     /* ... htid is from simple non-pivot tuple */
     526             :                     Assert(!inposting);
     527     6901524 :                     htid = curitup->t_tid;
     528             :                 }
     529     4563930 :                 else if (!inposting)
     530             :                 {
     531             :                     /* ... htid is first TID in new posting list */
     532       46564 :                     inposting = true;
     533       46564 :                     prevalldead = true;
     534       46564 :                     curposti = 0;
     535       46564 :                     htid = *BTreeTupleGetPostingN(curitup, 0);
     536             :                 }
     537             :                 else
     538             :                 {
     539             :                     /* ... htid is second or subsequent TID in posting list */
     540             :                     Assert(curposti > 0);
     541     4517366 :                     htid = *BTreeTupleGetPostingN(curitup, curposti);
     542             :                 }
     543             : 
     544             :                 /*
     545             :                  * If we are doing a recheck, we expect to find the tuple we
     546             :                  * are rechecking.  It's not a duplicate, but we have to keep
     547             :                  * scanning.
     548             :                  */
     549    11465690 :                 if (checkUnique == UNIQUE_CHECK_EXISTING &&
     550         236 :                     ItemPointerCompare(&htid, &itup->t_tid) == 0)
     551             :                 {
     552          54 :                     found = true;
     553             :                 }
     554             : 
     555             :                 /*
     556             :                  * Check if there's any table tuples for this index entry
     557             :                  * satisfying SnapshotDirty. This is necessary because for AMs
     558             :                  * with optimizations like heap's HOT, we have just a single
     559             :                  * index entry for the entire chain.
     560             :                  */
     561    11465400 :                 else if (table_index_fetch_tuple_check(heapRel, &htid,
     562             :                                                        &SnapshotDirty,
     563             :                                                        &all_dead))
     564             :                 {
     565             :                     TransactionId xwait;
     566             : 
     567             :                     /*
     568             :                      * It is a duplicate. If we are only doing a partial
     569             :                      * check, then don't bother checking if the tuple is being
     570             :                      * updated in another transaction. Just return the fact
     571             :                      * that it is a potential conflict and leave the full
     572             :                      * check till later. Don't invalidate binary search
     573             :                      * bounds.
     574             :                      */
     575         748 :                     if (checkUnique == UNIQUE_CHECK_PARTIAL)
     576             :                     {
     577         196 :                         if (nbuf != InvalidBuffer)
     578           0 :                             _bt_relbuf(rel, nbuf);
     579         196 :                         *is_unique = false;
     580         220 :                         return InvalidTransactionId;
     581             :                     }
     582             : 
     583             :                     /*
     584             :                      * If this tuple is being updated by other transaction
     585             :                      * then we have to wait for its commit/abort.
     586             :                      */
     587        1104 :                     xwait = (TransactionIdIsValid(SnapshotDirty.xmin)) ?
     588         552 :                         SnapshotDirty.xmin : SnapshotDirty.xmax;
     589             : 
     590         552 :                     if (TransactionIdIsValid(xwait))
     591             :                     {
     592          24 :                         if (nbuf != InvalidBuffer)
     593           0 :                             _bt_relbuf(rel, nbuf);
     594             :                         /* Tell _bt_doinsert to wait... */
     595          24 :                         *speculativeToken = SnapshotDirty.speculativeToken;
     596             :                         /* Caller releases lock on buf immediately */
     597          24 :                         insertstate->bounds_valid = false;
     598          24 :                         return xwait;
     599             :                     }
     600             : 
     601             :                     /*
     602             :                      * Otherwise we have a definite conflict.  But before
     603             :                      * complaining, look to see if the tuple we want to insert
     604             :                      * is itself now committed dead --- if so, don't complain.
     605             :                      * This is a waste of time in normal scenarios but we must
     606             :                      * do it to support CREATE INDEX CONCURRENTLY.
     607             :                      *
     608             :                      * We must follow HOT-chains here because during
     609             :                      * concurrent index build, we insert the root TID though
     610             :                      * the actual tuple may be somewhere in the HOT-chain.
     611             :                      * While following the chain we might not stop at the
     612             :                      * exact tuple which triggered the insert, but that's OK
     613             :                      * because if we find a live tuple anywhere in this chain,
     614             :                      * we have a unique key conflict.  The other live tuple is
     615             :                      * not part of this chain because it had a different index
     616             :                      * entry.
     617             :                      */
     618         528 :                     htid = itup->t_tid;
     619         528 :                     if (table_index_fetch_tuple_check(heapRel, &htid,
     620             :                                                       SnapshotSelf, NULL))
     621             :                     {
     622             :                         /* Normal case --- it's still live */
     623             :                     }
     624             :                     else
     625             :                     {
     626             :                         /*
     627             :                          * It's been deleted, so no error, and no need to
     628             :                          * continue searching
     629             :                          */
     630           0 :                         break;
     631             :                     }
     632             : 
     633             :                     /*
     634             :                      * Check for a conflict-in as we would if we were going to
     635             :                      * write to this page.  We aren't actually going to write,
     636             :                      * but we want a chance to report SSI conflicts that would
     637             :                      * otherwise be masked by this unique constraint
     638             :                      * violation.
     639             :                      */
     640         528 :                     CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate->buf));
     641             : 
     642             :                     /*
     643             :                      * This is a definite conflict.  Break the tuple down into
     644             :                      * datums and report the error.  But first, make sure we
     645             :                      * release the buffer locks we're holding ---
     646             :                      * BuildIndexValueDescription could make catalog accesses,
     647             :                      * which in the worst case might touch this same index and
     648             :                      * cause deadlocks.
     649             :                      */
     650         520 :                     if (nbuf != InvalidBuffer)
     651           0 :                         _bt_relbuf(rel, nbuf);
     652         520 :                     _bt_relbuf(rel, insertstate->buf);
     653         520 :                     insertstate->buf = InvalidBuffer;
     654         520 :                     insertstate->bounds_valid = false;
     655             : 
     656             :                     {
     657             :                         Datum       values[INDEX_MAX_KEYS];
     658             :                         bool        isnull[INDEX_MAX_KEYS];
     659             :                         char       *key_desc;
     660             : 
     661         520 :                         index_deform_tuple(itup, RelationGetDescr(rel),
     662             :                                            values, isnull);
     663             : 
     664         520 :                         key_desc = BuildIndexValueDescription(rel, values,
     665             :                                                               isnull);
     666             : 
     667         520 :                         ereport(ERROR,
     668             :                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
     669             :                                  errmsg("duplicate key value violates unique constraint \"%s\"",
     670             :                                         RelationGetRelationName(rel)),
     671             :                                  key_desc ? errdetail("Key %s already exists.",
     672             :                                                       key_desc) : 0,
     673             :                                  errtableconstraint(heapRel,
     674             :                                                     RelationGetRelationName(rel))));
     675             :                     }
     676             :                 }
     677    11464652 :                 else if (all_dead && (!inposting ||
     678       37950 :                                       (prevalldead &&
     679       37950 :                                        curposti == BTreeTupleGetNPosting(curitup) - 1)))
     680             :                 {
     681             :                     /*
     682             :                      * The conflicting tuple (or all HOT chains pointed to by
     683             :                      * all posting list TIDs) is dead to everyone, so mark the
     684             :                      * index entry killed.
     685             :                      */
     686      106262 :                     ItemIdMarkDead(curitemid);
     687      106262 :                     opaque->btpo_flags |= BTP_HAS_GARBAGE;
     688             : 
     689             :                     /*
     690             :                      * Mark buffer with a dirty hint, since state is not
     691             :                      * crucial. Be sure to mark the proper buffer dirty.
     692             :                      */
     693      106262 :                     if (nbuf != InvalidBuffer)
     694           4 :                         MarkBufferDirtyHint(nbuf, true);
     695             :                     else
     696      106258 :                         MarkBufferDirtyHint(insertstate->buf, true);
     697             :                 }
     698             : 
     699             :                 /*
     700             :                  * Remember if posting list tuple has even a single HOT chain
     701             :                  * whose members are not all dead
     702             :                  */
     703    11464706 :                 if (!all_dead && inposting)
     704     4525910 :                     prevalldead = false;
     705             :             }
     706             :         }
     707             : 
     708    14949610 :         if (inposting && curposti < BTreeTupleGetNPosting(curitup) - 1)
     709             :         {
     710             :             /* Advance to next TID in same posting list */
     711     4517366 :             curposti++;
     712     4517366 :             continue;
     713             :         }
     714    10432244 :         else if (offset < maxoff)
     715             :         {
     716             :             /* Advance to next tuple */
     717     7471224 :             curposti = 0;
     718     7471224 :             inposting = false;
     719     7471224 :             offset = OffsetNumberNext(offset);
     720             :         }
     721             :         else
     722             :         {
     723             :             int         highkeycmp;
     724             : 
     725             :             /* If scankey == hikey we gotta check the next page too */
     726     2961020 :             if (P_RIGHTMOST(opaque))
     727     2818402 :                 break;
     728      142618 :             highkeycmp = _bt_compare(rel, itup_key, page, P_HIKEY);
     729             :             Assert(highkeycmp <= 0);
     730      142618 :             if (highkeycmp != 0)
     731      132574 :                 break;
     732             :             /* Advance to next non-dead page --- there must be one */
     733             :             for (;;)
     734           0 :             {
     735       10044 :                 BlockNumber nblkno = opaque->btpo_next;
     736             : 
     737       10044 :                 nbuf = _bt_relandgetbuf(rel, nbuf, nblkno, BT_READ);
     738       10044 :                 page = BufferGetPage(nbuf);
     739       10044 :                 opaque = BTPageGetOpaque(page);
     740       10044 :                 if (!P_IGNORE(opaque))
     741       10044 :                     break;
     742           0 :                 if (P_RIGHTMOST(opaque))
     743           0 :                     elog(ERROR, "fell off the end of index \"%s\"",
     744             :                          RelationGetRelationName(rel));
     745             :             }
     746             :             /* Will also advance to next tuple */
     747       10044 :             curposti = 0;
     748       10044 :             inposting = false;
     749       10044 :             maxoff = PageGetMaxOffsetNumber(page);
     750       10044 :             offset = P_FIRSTDATAKEY(opaque);
     751             :             /* Don't invalidate binary search bounds */
     752             :         }
     753             :     }
     754             : 
     755             :     /*
     756             :      * If we are doing a recheck then we should have found the tuple we are
     757             :      * checking.  Otherwise there's something very wrong --- probably, the
     758             :      * index is on a non-immutable expression.
     759             :      */
     760     5221994 :     if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
     761           0 :         ereport(ERROR,
     762             :                 (errcode(ERRCODE_INTERNAL_ERROR),
     763             :                  errmsg("failed to re-find tuple within index \"%s\"",
     764             :                         RelationGetRelationName(rel)),
     765             :                  errhint("This may be because of a non-immutable index expression."),
     766             :                  errtableconstraint(heapRel,
     767             :                                     RelationGetRelationName(rel))));
     768             : 
     769     5221994 :     if (nbuf != InvalidBuffer)
     770        5760 :         _bt_relbuf(rel, nbuf);
     771             : 
     772     5221994 :     return InvalidTransactionId;
     773             : }
     774             : 
     775             : 
     776             : /*
     777             :  *  _bt_findinsertloc() -- Finds an insert location for a tuple
     778             :  *
     779             :  *      On entry, insertstate buffer contains the page the new tuple belongs
     780             :  *      on.  It is exclusive-locked and pinned by the caller.
     781             :  *
     782             :  *      If 'checkingunique' is true, the buffer on entry is the first page
     783             :  *      that contains duplicates of the new key.  If there are duplicates on
     784             :  *      multiple pages, the correct insertion position might be some page to
     785             :  *      the right, rather than the first page.  In that case, this function
     786             :  *      moves right to the correct target page.
     787             :  *
     788             :  *      (In a !heapkeyspace index, there can be multiple pages with the same
     789             :  *      high key, where the new tuple could legitimately be placed on.  In
     790             :  *      that case, the caller passes the first page containing duplicates,
     791             :  *      just like when checkingunique=true.  If that page doesn't have enough
     792             :  *      room for the new tuple, this function moves right, trying to find a
     793             :  *      legal page that does.)
     794             :  *
     795             :  *      If 'indexUnchanged' is true, this is for an UPDATE that didn't
     796             :  *      logically change the indexed value, but must nevertheless have a new
     797             :  *      entry to point to a successor version.  This hint from the executor
     798             :  *      will influence our behavior when the page might have to be split and
     799             :  *      we must consider our options.  Bottom-up index deletion can avoid
     800             :  *      pathological version-driven page splits, but we only want to go to the
     801             :  *      trouble of trying it when we already have moderate confidence that
     802             :  *      it's appropriate.  The hint should not significantly affect our
     803             :  *      behavior over time unless practically all inserts on to the leaf page
     804             :  *      get the hint.
     805             :  *
     806             :  *      On exit, insertstate buffer contains the chosen insertion page, and
     807             :  *      the offset within that page is returned.  If _bt_findinsertloc needed
     808             :  *      to move right, the lock and pin on the original page are released, and
     809             :  *      the new buffer is exclusively locked and pinned instead.
     810             :  *
     811             :  *      If insertstate contains cached binary search bounds, we will take
     812             :  *      advantage of them.  This avoids repeating comparisons that we made in
     813             :  *      _bt_check_unique() already.
     814             :  */
     815             : static OffsetNumber
     816     7299772 : _bt_findinsertloc(Relation rel,
     817             :                   BTInsertState insertstate,
     818             :                   bool checkingunique,
     819             :                   bool indexUnchanged,
     820             :                   BTStack stack,
     821             :                   Relation heapRel)
     822             : {
     823     7299772 :     BTScanInsert itup_key = insertstate->itup_key;
     824     7299772 :     Page        page = BufferGetPage(insertstate->buf);
     825             :     BTPageOpaque opaque;
     826             :     OffsetNumber newitemoff;
     827             : 
     828     7299772 :     opaque = BTPageGetOpaque(page);
     829             : 
     830             :     /* Check 1/3 of a page restriction */
     831     7299772 :     if (unlikely(insertstate->itemsz > BTMaxItemSize))
     832           0 :         _bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page,
     833             :                              insertstate->itup);
     834             : 
     835             :     Assert(P_ISLEAF(opaque) && !P_INCOMPLETE_SPLIT(opaque));
     836             :     Assert(!insertstate->bounds_valid || checkingunique);
     837             :     Assert(!itup_key->heapkeyspace || itup_key->scantid != NULL);
     838             :     Assert(itup_key->heapkeyspace || itup_key->scantid == NULL);
     839             :     Assert(!itup_key->allequalimage || itup_key->heapkeyspace);
     840             : 
     841     7299772 :     if (itup_key->heapkeyspace)
     842             :     {
     843             :         /* Keep track of whether checkingunique duplicate seen */
     844     7299772 :         bool        uniquedup = indexUnchanged;
     845             : 
     846             :         /*
     847             :          * If we're inserting into a unique index, we may have to walk right
     848             :          * through leaf pages to find the one leaf page that we must insert on
     849             :          * to.
     850             :          *
     851             :          * This is needed for checkingunique callers because a scantid was not
     852             :          * used when we called _bt_search().  scantid can only be set after
     853             :          * _bt_check_unique() has checked for duplicates.  The buffer
     854             :          * initially stored in insertstate->buf has the page where the first
     855             :          * duplicate key might be found, which isn't always the page that new
     856             :          * tuple belongs on.  The heap TID attribute for new tuple (scantid)
     857             :          * could force us to insert on a sibling page, though that should be
     858             :          * very rare in practice.
     859             :          */
     860     7299772 :         if (checkingunique)
     861             :         {
     862     5222130 :             if (insertstate->low < insertstate->stricthigh)
     863             :             {
     864             :                 /* Encountered a duplicate in _bt_check_unique() */
     865             :                 Assert(insertstate->bounds_valid);
     866      446686 :                 uniquedup = true;
     867             :             }
     868             : 
     869             :             for (;;)
     870             :             {
     871             :                 /*
     872             :                  * Does the new tuple belong on this page?
     873             :                  *
     874             :                  * The earlier _bt_check_unique() call may well have
     875             :                  * established a strict upper bound on the offset for the new
     876             :                  * item.  If it's not the last item of the page (i.e. if there
     877             :                  * is at least one tuple on the page that goes after the tuple
     878             :                  * we're inserting) then we know that the tuple belongs on
     879             :                  * this page.  We can skip the high key check.
     880             :                  */
     881     5232174 :                 if (insertstate->bounds_valid &&
     882    10422512 :                     insertstate->low <= insertstate->stricthigh &&
     883     5211256 :                     insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
     884     2244744 :                     break;
     885             : 
     886             :                 /* Test '<=', not '!=', since scantid is set now */
     887     3145208 :                 if (P_RIGHTMOST(opaque) ||
     888      157778 :                     _bt_compare(rel, itup_key, page, P_HIKEY) <= 0)
     889             :                     break;
     890             : 
     891       10044 :                 _bt_stepright(rel, heapRel, insertstate, stack);
     892             :                 /* Update local state after stepping right */
     893       10044 :                 page = BufferGetPage(insertstate->buf);
     894       10044 :                 opaque = BTPageGetOpaque(page);
     895             :                 /* Assume duplicates (if checkingunique) */
     896       10044 :                 uniquedup = true;
     897             :             }
     898             :         }
     899             : 
     900             :         /*
     901             :          * If the target page cannot fit newitem, try to avoid splitting the
     902             :          * page on insert by performing deletion or deduplication now
     903             :          */
     904     7299772 :         if (PageGetFreeSpace(page) < insertstate->itemsz)
     905       51288 :             _bt_delete_or_dedup_one_page(rel, heapRel, insertstate, false,
     906             :                                          checkingunique, uniquedup,
     907             :                                          indexUnchanged);
     908             :     }
     909             :     else
     910             :     {
     911             :         /*----------
     912             :          * This is a !heapkeyspace (version 2 or 3) index.  The current page
     913             :          * is the first page that we could insert the new tuple to, but there
     914             :          * may be other pages to the right that we could opt to use instead.
     915             :          *
     916             :          * If the new key is equal to one or more existing keys, we can
     917             :          * legitimately place it anywhere in the series of equal keys.  In
     918             :          * fact, if the new key is equal to the page's "high key" we can place
     919             :          * it on the next page.  If it is equal to the high key, and there's
     920             :          * not room to insert the new tuple on the current page without
     921             :          * splitting, then we move right hoping to find more free space and
     922             :          * avoid a split.
     923             :          *
     924             :          * Keep scanning right until we
     925             :          *      (a) find a page with enough free space,
     926             :          *      (b) reach the last page where the tuple can legally go, or
     927             :          *      (c) get tired of searching.
     928             :          * (c) is not flippant; it is important because if there are many
     929             :          * pages' worth of equal keys, it's better to split one of the early
     930             :          * pages than to scan all the way to the end of the run of equal keys
     931             :          * on every insert.  We implement "get tired" as a random choice,
     932             :          * since stopping after scanning a fixed number of pages wouldn't work
     933             :          * well (we'd never reach the right-hand side of previously split
     934             :          * pages).  The probability of moving right is set at 0.99, which may
     935             :          * seem too high to change the behavior much, but it does an excellent
     936             :          * job of preventing O(N^2) behavior with many equal keys.
     937             :          *----------
     938             :          */
     939           0 :         while (PageGetFreeSpace(page) < insertstate->itemsz)
     940             :         {
     941             :             /*
     942             :              * Before considering moving right, see if we can obtain enough
     943             :              * space by erasing LP_DEAD items
     944             :              */
     945           0 :             if (P_HAS_GARBAGE(opaque))
     946             :             {
     947             :                 /* Perform simple deletion */
     948           0 :                 _bt_delete_or_dedup_one_page(rel, heapRel, insertstate, true,
     949             :                                              false, false, false);
     950             : 
     951           0 :                 if (PageGetFreeSpace(page) >= insertstate->itemsz)
     952           0 :                     break;      /* OK, now we have enough space */
     953             :             }
     954             : 
     955             :             /*
     956             :              * Nope, so check conditions (b) and (c) enumerated above
     957             :              *
     958             :              * The earlier _bt_check_unique() call may well have established a
     959             :              * strict upper bound on the offset for the new item.  If it's not
     960             :              * the last item of the page (i.e. if there is at least one tuple
     961             :              * on the page that's greater than the tuple we're inserting to)
     962             :              * then we know that the tuple belongs on this page.  We can skip
     963             :              * the high key check.
     964             :              */
     965           0 :             if (insertstate->bounds_valid &&
     966           0 :                 insertstate->low <= insertstate->stricthigh &&
     967           0 :                 insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
     968           0 :                 break;
     969             : 
     970           0 :             if (P_RIGHTMOST(opaque) ||
     971           0 :                 _bt_compare(rel, itup_key, page, P_HIKEY) != 0 ||
     972           0 :                 pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 100))
     973             :                 break;
     974             : 
     975           0 :             _bt_stepright(rel, heapRel, insertstate, stack);
     976             :             /* Update local state after stepping right */
     977           0 :             page = BufferGetPage(insertstate->buf);
     978           0 :             opaque = BTPageGetOpaque(page);
     979             :         }
     980             :     }
     981             : 
     982             :     /*
     983             :      * We should now be on the correct page.  Find the offset within the page
     984             :      * for the new tuple. (Possibly reusing earlier search bounds.)
     985             :      */
     986             :     Assert(P_RIGHTMOST(opaque) ||
     987             :            _bt_compare(rel, itup_key, page, P_HIKEY) <= 0);
     988             : 
     989     7299772 :     newitemoff = _bt_binsrch_insert(rel, insertstate);
     990             : 
     991     7299772 :     if (insertstate->postingoff == -1)
     992             :     {
     993             :         /*
     994             :          * There is an overlapping posting list tuple with its LP_DEAD bit
     995             :          * set.  We don't want to unnecessarily unset its LP_DEAD bit while
     996             :          * performing a posting list split, so perform simple index tuple
     997             :          * deletion early.
     998             :          */
     999           2 :         _bt_delete_or_dedup_one_page(rel, heapRel, insertstate, true,
    1000             :                                      false, false, false);
    1001             : 
    1002             :         /*
    1003             :          * Do new binary search.  New insert location cannot overlap with any
    1004             :          * posting list now.
    1005             :          */
    1006             :         Assert(!insertstate->bounds_valid);
    1007           2 :         insertstate->postingoff = 0;
    1008           2 :         newitemoff = _bt_binsrch_insert(rel, insertstate);
    1009             :         Assert(insertstate->postingoff == 0);
    1010             :     }
    1011             : 
    1012     7299772 :     return newitemoff;
    1013             : }
    1014             : 
    1015             : /*
    1016             :  * Step right to next non-dead page, during insertion.
    1017             :  *
    1018             :  * This is a bit more complicated than moving right in a search.  We must
    1019             :  * write-lock the target page before releasing write lock on current page;
    1020             :  * else someone else's _bt_check_unique scan could fail to see our insertion.
    1021             :  * Write locks on intermediate dead pages won't do because we don't know when
    1022             :  * they will get de-linked from the tree.
    1023             :  *
    1024             :  * This is more aggressive than it needs to be for non-unique !heapkeyspace
    1025             :  * indexes.
    1026             :  */
    1027             : static void
    1028       10044 : _bt_stepright(Relation rel, Relation heaprel, BTInsertState insertstate,
    1029             :               BTStack stack)
    1030             : {
    1031             :     Page        page;
    1032             :     BTPageOpaque opaque;
    1033             :     Buffer      rbuf;
    1034             :     BlockNumber rblkno;
    1035             : 
    1036             :     Assert(heaprel != NULL);
    1037       10044 :     page = BufferGetPage(insertstate->buf);
    1038       10044 :     opaque = BTPageGetOpaque(page);
    1039             : 
    1040       10044 :     rbuf = InvalidBuffer;
    1041       10044 :     rblkno = opaque->btpo_next;
    1042             :     for (;;)
    1043             :     {
    1044       10044 :         rbuf = _bt_relandgetbuf(rel, rbuf, rblkno, BT_WRITE);
    1045       10044 :         page = BufferGetPage(rbuf);
    1046       10044 :         opaque = BTPageGetOpaque(page);
    1047             : 
    1048             :         /*
    1049             :          * If this page was incompletely split, finish the split now.  We do
    1050             :          * this while holding a lock on the left sibling, which is not good
    1051             :          * because finishing the split could be a fairly lengthy operation.
    1052             :          * But this should happen very seldom.
    1053             :          */
    1054       10044 :         if (P_INCOMPLETE_SPLIT(opaque))
    1055             :         {
    1056           0 :             _bt_finish_split(rel, heaprel, rbuf, stack);
    1057           0 :             rbuf = InvalidBuffer;
    1058           0 :             continue;
    1059             :         }
    1060             : 
    1061       10044 :         if (!P_IGNORE(opaque))
    1062       10044 :             break;
    1063           0 :         if (P_RIGHTMOST(opaque))
    1064           0 :             elog(ERROR, "fell off the end of index \"%s\"",
    1065             :                  RelationGetRelationName(rel));
    1066             : 
    1067           0 :         rblkno = opaque->btpo_next;
    1068             :     }
    1069             :     /* rbuf locked; unlock buf, update state for caller */
    1070       10044 :     _bt_relbuf(rel, insertstate->buf);
    1071       10044 :     insertstate->buf = rbuf;
    1072       10044 :     insertstate->bounds_valid = false;
    1073       10044 : }
    1074             : 
    1075             : /*----------
    1076             :  *  _bt_insertonpg() -- Insert a tuple on a particular page in the index.
    1077             :  *
    1078             :  *      This recursive procedure does the following things:
    1079             :  *
    1080             :  *          +  if postingoff != 0, splits existing posting list tuple
    1081             :  *             (since it overlaps with new 'itup' tuple).
    1082             :  *          +  if necessary, splits the target page, using 'itup_key' for
    1083             :  *             suffix truncation on leaf pages (caller passes NULL for
    1084             :  *             non-leaf pages).
    1085             :  *          +  inserts the new tuple (might be split from posting list).
    1086             :  *          +  if the page was split, pops the parent stack, and finds the
    1087             :  *             right place to insert the new child pointer (by walking
    1088             :  *             right using information stored in the parent stack).
    1089             :  *          +  invokes itself with the appropriate tuple for the right
    1090             :  *             child page on the parent.
    1091             :  *          +  updates the metapage if a true root or fast root is split.
    1092             :  *
    1093             :  *      On entry, we must have the correct buffer in which to do the
    1094             :  *      insertion, and the buffer must be pinned and write-locked.  On return,
    1095             :  *      we will have dropped both the pin and the lock on the buffer.
    1096             :  *
    1097             :  *      This routine only performs retail tuple insertions.  'itup' should
    1098             :  *      always be either a non-highkey leaf item, or a downlink (new high
    1099             :  *      key items are created indirectly, when a page is split).  When
    1100             :  *      inserting to a non-leaf page, 'cbuf' is the left-sibling of the page
    1101             :  *      we're inserting the downlink for.  This function will clear the
    1102             :  *      INCOMPLETE_SPLIT flag on it, and release the buffer.
    1103             :  *----------
    1104             :  */
    1105             : static void
    1106     7320988 : _bt_insertonpg(Relation rel,
    1107             :                Relation heaprel,
    1108             :                BTScanInsert itup_key,
    1109             :                Buffer buf,
    1110             :                Buffer cbuf,
    1111             :                BTStack stack,
    1112             :                IndexTuple itup,
    1113             :                Size itemsz,
    1114             :                OffsetNumber newitemoff,
    1115             :                int postingoff,
    1116             :                bool split_only_page)
    1117             : {
    1118             :     Page        page;
    1119             :     BTPageOpaque opaque;
    1120             :     bool        isleaf,
    1121             :                 isroot,
    1122             :                 isrightmost,
    1123             :                 isonly;
    1124     7320988 :     IndexTuple  oposting = NULL;
    1125     7320988 :     IndexTuple  origitup = NULL;
    1126     7320988 :     IndexTuple  nposting = NULL;
    1127             : 
    1128     7320988 :     page = BufferGetPage(buf);
    1129     7320988 :     opaque = BTPageGetOpaque(page);
    1130     7320988 :     isleaf = P_ISLEAF(opaque);
    1131     7320988 :     isroot = P_ISROOT(opaque);
    1132     7320988 :     isrightmost = P_RIGHTMOST(opaque);
    1133     7320988 :     isonly = P_LEFTMOST(opaque) && P_RIGHTMOST(opaque);
    1134             : 
    1135             :     /* child buffer must be given iff inserting on an internal page */
    1136             :     Assert(isleaf == !BufferIsValid(cbuf));
    1137             :     /* tuple must have appropriate number of attributes */
    1138             :     Assert(!isleaf ||
    1139             :            BTreeTupleGetNAtts(itup, rel) ==
    1140             :            IndexRelationGetNumberOfAttributes(rel));
    1141             :     Assert(isleaf ||
    1142             :            BTreeTupleGetNAtts(itup, rel) <=
    1143             :            IndexRelationGetNumberOfKeyAttributes(rel));
    1144             :     Assert(!BTreeTupleIsPosting(itup));
    1145             :     Assert(MAXALIGN(IndexTupleSize(itup)) == itemsz);
    1146             :     /* Caller must always finish incomplete split for us */
    1147             :     Assert(!P_INCOMPLETE_SPLIT(opaque));
    1148             : 
    1149             :     /*
    1150             :      * Every internal page should have exactly one negative infinity item at
    1151             :      * all times.  Only _bt_split() and _bt_newlevel() should add items that
    1152             :      * become negative infinity items through truncation, since they're the
    1153             :      * only routines that allocate new internal pages.
    1154             :      */
    1155             :     Assert(isleaf || newitemoff > P_FIRSTDATAKEY(opaque));
    1156             : 
    1157             :     /*
    1158             :      * Do we need to split an existing posting list item?
    1159             :      */
    1160     7320988 :     if (postingoff != 0)
    1161             :     {
    1162       22442 :         ItemId      itemid = PageGetItemId(page, newitemoff);
    1163             : 
    1164             :         /*
    1165             :          * The new tuple is a duplicate with a heap TID that falls inside the
    1166             :          * range of an existing posting list tuple on a leaf page.  Prepare to
    1167             :          * split an existing posting list.  Overwriting the posting list with
    1168             :          * its post-split version is treated as an extra step in either the
    1169             :          * insert or page split critical section.
    1170             :          */
    1171             :         Assert(isleaf && itup_key->heapkeyspace && itup_key->allequalimage);
    1172       22442 :         oposting = (IndexTuple) PageGetItem(page, itemid);
    1173             : 
    1174             :         /*
    1175             :          * postingoff value comes from earlier call to _bt_binsrch_posting().
    1176             :          * Its binary search might think that a plain tuple must be a posting
    1177             :          * list tuple that needs to be split.  This can happen with corruption
    1178             :          * involving an existing plain tuple that is a duplicate of the new
    1179             :          * item, up to and including its table TID.  Check for that here in
    1180             :          * passing.
    1181             :          *
    1182             :          * Also verify that our caller has made sure that the existing posting
    1183             :          * list tuple does not have its LP_DEAD bit set.
    1184             :          */
    1185       22442 :         if (!BTreeTupleIsPosting(oposting) || ItemIdIsDead(itemid))
    1186           0 :             ereport(ERROR,
    1187             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1188             :                      errmsg_internal("table tid from new index tuple (%u,%u) overlaps with invalid duplicate tuple at offset %u of block %u in index \"%s\"",
    1189             :                                      ItemPointerGetBlockNumber(&itup->t_tid),
    1190             :                                      ItemPointerGetOffsetNumber(&itup->t_tid),
    1191             :                                      newitemoff, BufferGetBlockNumber(buf),
    1192             :                                      RelationGetRelationName(rel))));
    1193             : 
    1194             :         /* use a mutable copy of itup as our itup from here on */
    1195       22442 :         origitup = itup;
    1196       22442 :         itup = CopyIndexTuple(origitup);
    1197       22442 :         nposting = _bt_swap_posting(itup, oposting, postingoff);
    1198             :         /* itup now contains rightmost/max TID from oposting */
    1199             : 
    1200             :         /* Alter offset so that newitem goes after posting list */
    1201       22442 :         newitemoff = OffsetNumberNext(newitemoff);
    1202             :     }
    1203             : 
    1204             :     /*
    1205             :      * Do we need to split the page to fit the item on it?
    1206             :      *
    1207             :      * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
    1208             :      * so this comparison is correct even though we appear to be accounting
    1209             :      * only for the item and not for its line pointer.
    1210             :      */
    1211     7320988 :     if (PageGetFreeSpace(page) < itemsz)
    1212             :     {
    1213             :         Buffer      rbuf;
    1214             : 
    1215             :         Assert(!split_only_page);
    1216             : 
    1217             :         /* split the buffer into left and right halves */
    1218       22580 :         rbuf = _bt_split(rel, heaprel, itup_key, buf, cbuf, newitemoff, itemsz,
    1219             :                          itup, origitup, nposting, postingoff);
    1220       22580 :         PredicateLockPageSplit(rel,
    1221             :                                BufferGetBlockNumber(buf),
    1222             :                                BufferGetBlockNumber(rbuf));
    1223             : 
    1224             :         /*----------
    1225             :          * By here,
    1226             :          *
    1227             :          *      +  our target page has been split;
    1228             :          *      +  the original tuple has been inserted;
    1229             :          *      +  we have write locks on both the old (left half)
    1230             :          *         and new (right half) buffers, after the split; and
    1231             :          *      +  we know the key we want to insert into the parent
    1232             :          *         (it's the "high key" on the left child page).
    1233             :          *
    1234             :          * We're ready to do the parent insertion.  We need to hold onto the
    1235             :          * locks for the child pages until we locate the parent, but we can
    1236             :          * at least release the lock on the right child before doing the
    1237             :          * actual insertion.  The lock on the left child will be released
    1238             :          * last of all by parent insertion, where it is the 'cbuf' of parent
    1239             :          * page.
    1240             :          *----------
    1241             :          */
    1242       22580 :         _bt_insert_parent(rel, heaprel, buf, rbuf, stack, isroot, isonly);
    1243             :     }
    1244             :     else
    1245             :     {
    1246     7298408 :         Buffer      metabuf = InvalidBuffer;
    1247     7298408 :         Page        metapg = NULL;
    1248     7298408 :         BTMetaPageData *metad = NULL;
    1249             :         BlockNumber blockcache;
    1250             : 
    1251             :         /*
    1252             :          * If we are doing this insert because we split a page that was the
    1253             :          * only one on its tree level, but was not the root, it may have been
    1254             :          * the "fast root".  We need to ensure that the fast root link points
    1255             :          * at or above the current page.  We can safely acquire a lock on the
    1256             :          * metapage here --- see comments for _bt_newlevel().
    1257             :          */
    1258     7298408 :         if (unlikely(split_only_page))
    1259             :         {
    1260             :             Assert(!isleaf);
    1261             :             Assert(BufferIsValid(cbuf));
    1262             : 
    1263          22 :             metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    1264          22 :             metapg = BufferGetPage(metabuf);
    1265          22 :             metad = BTPageGetMeta(metapg);
    1266             : 
    1267          22 :             if (metad->btm_fastlevel >= opaque->btpo_level)
    1268             :             {
    1269             :                 /* no update wanted */
    1270           0 :                 _bt_relbuf(rel, metabuf);
    1271           0 :                 metabuf = InvalidBuffer;
    1272             :             }
    1273             :         }
    1274             : 
    1275             :         /* Do the update.  No ereport(ERROR) until changes are logged */
    1276     7298408 :         START_CRIT_SECTION();
    1277             : 
    1278     7298408 :         if (postingoff != 0)
    1279       22374 :             memcpy(oposting, nposting, MAXALIGN(IndexTupleSize(nposting)));
    1280             : 
    1281     7298408 :         if (PageAddItem(page, itup, itemsz, newitemoff, false, false) == InvalidOffsetNumber)
    1282           0 :             elog(PANIC, "failed to add new item to block %u in index \"%s\"",
    1283             :                  BufferGetBlockNumber(buf), RelationGetRelationName(rel));
    1284             : 
    1285     7298408 :         MarkBufferDirty(buf);
    1286             : 
    1287     7298408 :         if (BufferIsValid(metabuf))
    1288             :         {
    1289             :             /* upgrade meta-page if needed */
    1290          22 :             if (metad->btm_version < BTREE_NOVAC_VERSION)
    1291           0 :                 _bt_upgrademetapage(metapg);
    1292          22 :             metad->btm_fastroot = BufferGetBlockNumber(buf);
    1293          22 :             metad->btm_fastlevel = opaque->btpo_level;
    1294          22 :             MarkBufferDirty(metabuf);
    1295             :         }
    1296             : 
    1297             :         /*
    1298             :          * Clear INCOMPLETE_SPLIT flag on child if inserting the new item
    1299             :          * finishes a split
    1300             :          */
    1301     7298408 :         if (!isleaf)
    1302             :         {
    1303       21014 :             Page        cpage = BufferGetPage(cbuf);
    1304       21014 :             BTPageOpaque cpageop = BTPageGetOpaque(cpage);
    1305             : 
    1306             :             Assert(P_INCOMPLETE_SPLIT(cpageop));
    1307       21014 :             cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
    1308       21014 :             MarkBufferDirty(cbuf);
    1309             :         }
    1310             : 
    1311             :         /* XLOG stuff */
    1312     7298408 :         if (RelationNeedsWAL(rel))
    1313             :         {
    1314             :             xl_btree_insert xlrec;
    1315             :             xl_btree_metadata xlmeta;
    1316             :             uint8       xlinfo;
    1317             :             XLogRecPtr  recptr;
    1318             :             uint16      upostingoff;
    1319             : 
    1320     6792376 :             xlrec.offnum = newitemoff;
    1321             : 
    1322     6792376 :             XLogBeginInsert();
    1323     6792376 :             XLogRegisterData(&xlrec, SizeOfBtreeInsert);
    1324             : 
    1325     6792376 :             if (isleaf && postingoff == 0)
    1326             :             {
    1327             :                 /* Simple leaf insert */
    1328     6750242 :                 xlinfo = XLOG_BTREE_INSERT_LEAF;
    1329             :             }
    1330       42134 :             else if (postingoff != 0)
    1331             :             {
    1332             :                 /*
    1333             :                  * Leaf insert with posting list split.  Must include
    1334             :                  * postingoff field before newitem/orignewitem.
    1335             :                  */
    1336             :                 Assert(isleaf);
    1337       22374 :                 xlinfo = XLOG_BTREE_INSERT_POST;
    1338             :             }
    1339             :             else
    1340             :             {
    1341             :                 /* Internal page insert, which finishes a split on cbuf */
    1342       19760 :                 xlinfo = XLOG_BTREE_INSERT_UPPER;
    1343       19760 :                 XLogRegisterBuffer(1, cbuf, REGBUF_STANDARD);
    1344             : 
    1345       19760 :                 if (BufferIsValid(metabuf))
    1346             :                 {
    1347             :                     /* Actually, it's an internal page insert + meta update */
    1348          22 :                     xlinfo = XLOG_BTREE_INSERT_META;
    1349             : 
    1350             :                     Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
    1351          22 :                     xlmeta.version = metad->btm_version;
    1352          22 :                     xlmeta.root = metad->btm_root;
    1353          22 :                     xlmeta.level = metad->btm_level;
    1354          22 :                     xlmeta.fastroot = metad->btm_fastroot;
    1355          22 :                     xlmeta.fastlevel = metad->btm_fastlevel;
    1356          22 :                     xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
    1357          22 :                     xlmeta.allequalimage = metad->btm_allequalimage;
    1358             : 
    1359          22 :                     XLogRegisterBuffer(2, metabuf,
    1360             :                                        REGBUF_WILL_INIT | REGBUF_STANDARD);
    1361          22 :                     XLogRegisterBufData(2, &xlmeta,
    1362             :                                         sizeof(xl_btree_metadata));
    1363             :                 }
    1364             :             }
    1365             : 
    1366     6792376 :             XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    1367     6792376 :             if (postingoff == 0)
    1368             :             {
    1369             :                 /* Just log itup from caller */
    1370     6770002 :                 XLogRegisterBufData(0, itup, IndexTupleSize(itup));
    1371             :             }
    1372             :             else
    1373             :             {
    1374             :                 /*
    1375             :                  * Insert with posting list split (XLOG_BTREE_INSERT_POST
    1376             :                  * record) case.
    1377             :                  *
    1378             :                  * Log postingoff.  Also log origitup, not itup.  REDO routine
    1379             :                  * must reconstruct final itup (as well as nposting) using
    1380             :                  * _bt_swap_posting().
    1381             :                  */
    1382       22374 :                 upostingoff = postingoff;
    1383             : 
    1384       22374 :                 XLogRegisterBufData(0, &upostingoff, sizeof(uint16));
    1385       22374 :                 XLogRegisterBufData(0, origitup,
    1386       22374 :                                     IndexTupleSize(origitup));
    1387             :             }
    1388             : 
    1389     6792376 :             recptr = XLogInsert(RM_BTREE_ID, xlinfo);
    1390             : 
    1391     6792376 :             if (BufferIsValid(metabuf))
    1392          22 :                 PageSetLSN(metapg, recptr);
    1393     6792376 :             if (!isleaf)
    1394       19760 :                 PageSetLSN(BufferGetPage(cbuf), recptr);
    1395             : 
    1396     6792376 :             PageSetLSN(page, recptr);
    1397             :         }
    1398             : 
    1399     7298408 :         END_CRIT_SECTION();
    1400             : 
    1401             :         /* Release subsidiary buffers */
    1402     7298408 :         if (BufferIsValid(metabuf))
    1403          22 :             _bt_relbuf(rel, metabuf);
    1404     7298408 :         if (!isleaf)
    1405       21014 :             _bt_relbuf(rel, cbuf);
    1406             : 
    1407             :         /*
    1408             :          * Cache the block number if this is the rightmost leaf page.  Cache
    1409             :          * may be used by a future inserter within _bt_search_insert().
    1410             :          */
    1411     7298408 :         blockcache = InvalidBlockNumber;
    1412     7298408 :         if (isrightmost && isleaf && !isroot)
    1413     4069628 :             blockcache = BufferGetBlockNumber(buf);
    1414             : 
    1415             :         /* Release buffer for insertion target block */
    1416     7298408 :         _bt_relbuf(rel, buf);
    1417             : 
    1418             :         /*
    1419             :          * If we decided to cache the insertion target block before releasing
    1420             :          * its buffer lock, then cache it now.  Check the height of the tree
    1421             :          * first, though.  We don't go for the optimization with small
    1422             :          * indexes.  Defer final check to this point to ensure that we don't
    1423             :          * call _bt_getrootheight while holding a buffer lock.
    1424             :          */
    1425    11368036 :         if (BlockNumberIsValid(blockcache) &&
    1426     4069628 :             _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)
    1427       69380 :             RelationSetTargetBlock(rel, blockcache);
    1428             :     }
    1429             : 
    1430             :     /* be tidy */
    1431     7320988 :     if (postingoff != 0)
    1432             :     {
    1433             :         /* itup is actually a modified copy of caller's original */
    1434       22442 :         pfree(nposting);
    1435       22442 :         pfree(itup);
    1436             :     }
    1437     7320988 : }
    1438             : 
    1439             : /*
    1440             :  *  _bt_split() -- split a page in the btree.
    1441             :  *
    1442             :  *      On entry, buf is the page to split, and is pinned and write-locked.
    1443             :  *      newitemoff etc. tell us about the new item that must be inserted
    1444             :  *      along with the data from the original page.
    1445             :  *
    1446             :  *      itup_key is used for suffix truncation on leaf pages (internal
    1447             :  *      page callers pass NULL).  When splitting a non-leaf page, 'cbuf'
    1448             :  *      is the left-sibling of the page we're inserting the downlink for.
    1449             :  *      This function will clear the INCOMPLETE_SPLIT flag on it, and
    1450             :  *      release the buffer.
    1451             :  *
    1452             :  *      orignewitem, nposting, and postingoff are needed when an insert of
    1453             :  *      orignewitem results in both a posting list split and a page split.
    1454             :  *      These extra posting list split details are used here in the same
    1455             :  *      way as they are used in the more common case where a posting list
    1456             :  *      split does not coincide with a page split.  We need to deal with
    1457             :  *      posting list splits directly in order to ensure that everything
    1458             :  *      that follows from the insert of orignewitem is handled as a single
    1459             :  *      atomic operation (though caller's insert of a new pivot/downlink
    1460             :  *      into parent page will still be a separate operation).  See
    1461             :  *      nbtree/README for details on the design of posting list splits.
    1462             :  *
    1463             :  *      Returns the new right sibling of buf, pinned and write-locked.
    1464             :  *      The pin and lock on buf are maintained.
    1465             :  */
    1466             : static Buffer
    1467       22580 : _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key, Buffer buf,
    1468             :           Buffer cbuf, OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
    1469             :           IndexTuple orignewitem, IndexTuple nposting, uint16 postingoff)
    1470             : {
    1471             :     Buffer      rbuf;
    1472             :     Page        origpage;
    1473             :     Page        leftpage,
    1474             :                 rightpage;
    1475             :     PGAlignedBlock leftpage_buf,
    1476             :                 rightpage_buf;
    1477             :     BlockNumber origpagenumber,
    1478             :                 rightpagenumber;
    1479             :     BTPageOpaque ropaque,
    1480             :                 lopaque,
    1481             :                 oopaque;
    1482       22580 :     Buffer      sbuf = InvalidBuffer;
    1483       22580 :     Page        spage = NULL;
    1484       22580 :     BTPageOpaque sopaque = NULL;
    1485             :     Size        itemsz;
    1486             :     ItemId      itemid;
    1487             :     IndexTuple  firstright,
    1488             :                 lefthighkey;
    1489             :     OffsetNumber firstrightoff;
    1490             :     OffsetNumber afterleftoff,
    1491             :                 afterrightoff,
    1492             :                 minusinfoff;
    1493             :     OffsetNumber origpagepostingoff;
    1494             :     OffsetNumber maxoff;
    1495             :     OffsetNumber i;
    1496             :     bool        newitemonleft,
    1497             :                 isleaf,
    1498             :                 isrightmost;
    1499             : 
    1500             :     /*
    1501             :      * origpage is the original page to be split.  leftpage is a temporary
    1502             :      * buffer that receives the left-sibling data, which will be copied back
    1503             :      * into origpage on success.  rightpage is the new page that will receive
    1504             :      * the right-sibling data.
    1505             :      *
    1506             :      * leftpage is allocated after choosing a split point.  rightpage's new
    1507             :      * buffer isn't acquired until after leftpage is initialized and has new
    1508             :      * high key, the last point where splitting the page may fail (barring
    1509             :      * corruption).  Failing before acquiring new buffer won't have lasting
    1510             :      * consequences, since origpage won't have been modified and leftpage is
    1511             :      * only workspace.
    1512             :      */
    1513       22580 :     origpage = BufferGetPage(buf);
    1514       22580 :     oopaque = BTPageGetOpaque(origpage);
    1515       22580 :     isleaf = P_ISLEAF(oopaque);
    1516       22580 :     isrightmost = P_RIGHTMOST(oopaque);
    1517       22580 :     maxoff = PageGetMaxOffsetNumber(origpage);
    1518       22580 :     origpagenumber = BufferGetBlockNumber(buf);
    1519             : 
    1520             :     /*
    1521             :      * Choose a point to split origpage at.
    1522             :      *
    1523             :      * A split point can be thought of as a point _between_ two existing data
    1524             :      * items on origpage (the lastleft and firstright tuples), provided you
    1525             :      * pretend that the new item that didn't fit is already on origpage.
    1526             :      *
    1527             :      * Since origpage does not actually contain newitem, the representation of
    1528             :      * split points needs to work with two boundary cases: splits where
    1529             :      * newitem is lastleft, and splits where newitem is firstright.
    1530             :      * newitemonleft resolves the ambiguity that would otherwise exist when
    1531             :      * newitemoff == firstrightoff.  In all other cases it's clear which side
    1532             :      * of the split every tuple goes on from context.  newitemonleft is
    1533             :      * usually (but not always) redundant information.
    1534             :      *
    1535             :      * firstrightoff is supposed to be an origpage offset number, but it's
    1536             :      * possible that its value will be maxoff+1, which is "past the end" of
    1537             :      * origpage.  This happens in the rare case where newitem goes after all
    1538             :      * existing items (i.e. newitemoff is maxoff+1) and we end up splitting
    1539             :      * origpage at the point that leaves newitem alone on new right page.  Any
    1540             :      * "!newitemonleft && newitemoff == firstrightoff" split point makes
    1541             :      * newitem the firstright tuple, though, so this case isn't a special
    1542             :      * case.
    1543             :      */
    1544       22580 :     firstrightoff = _bt_findsplitloc(rel, origpage, newitemoff, newitemsz,
    1545             :                                      newitem, &newitemonleft);
    1546             : 
    1547             :     /* Use temporary buffer for leftpage */
    1548       22580 :     leftpage = leftpage_buf.data;
    1549       22580 :     _bt_pageinit(leftpage, BufferGetPageSize(buf));
    1550       22580 :     lopaque = BTPageGetOpaque(leftpage);
    1551             : 
    1552             :     /*
    1553             :      * leftpage won't be the root when we're done.  Also, clear the SPLIT_END
    1554             :      * and HAS_GARBAGE flags.
    1555             :      */
    1556       22580 :     lopaque->btpo_flags = oopaque->btpo_flags;
    1557       22580 :     lopaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
    1558             :     /* set flag in leftpage indicating that rightpage has no downlink yet */
    1559       22580 :     lopaque->btpo_flags |= BTP_INCOMPLETE_SPLIT;
    1560       22580 :     lopaque->btpo_prev = oopaque->btpo_prev;
    1561             :     /* handle btpo_next after rightpage buffer acquired */
    1562       22580 :     lopaque->btpo_level = oopaque->btpo_level;
    1563             :     /* handle btpo_cycleid after rightpage buffer acquired */
    1564             : 
    1565             :     /*
    1566             :      * Copy the original page's LSN into leftpage, which will become the
    1567             :      * updated version of the page.  We need this because XLogInsert will
    1568             :      * examine the LSN and possibly dump it in a page image.
    1569             :      */
    1570       22580 :     PageSetLSN(leftpage, PageGetLSN(origpage));
    1571             : 
    1572             :     /*
    1573             :      * Determine page offset number of existing overlapped-with-orignewitem
    1574             :      * posting list when it is necessary to perform a posting list split in
    1575             :      * passing.  Note that newitem was already changed by caller (newitem no
    1576             :      * longer has the orignewitem TID).
    1577             :      *
    1578             :      * This page offset number (origpagepostingoff) will be used to pretend
    1579             :      * that the posting split has already taken place, even though the
    1580             :      * required modifications to origpage won't occur until we reach the
    1581             :      * critical section.  The lastleft and firstright tuples of our page split
    1582             :      * point should, in effect, come from an imaginary version of origpage
    1583             :      * that has the nposting tuple instead of the original posting list tuple.
    1584             :      *
    1585             :      * Note: _bt_findsplitloc() should have compensated for coinciding posting
    1586             :      * list splits in just the same way, at least in theory.  It doesn't
    1587             :      * bother with that, though.  In practice it won't affect its choice of
    1588             :      * split point.
    1589             :      */
    1590       22580 :     origpagepostingoff = InvalidOffsetNumber;
    1591       22580 :     if (postingoff != 0)
    1592             :     {
    1593             :         Assert(isleaf);
    1594             :         Assert(ItemPointerCompare(&orignewitem->t_tid,
    1595             :                                   &newitem->t_tid) < 0);
    1596             :         Assert(BTreeTupleIsPosting(nposting));
    1597          68 :         origpagepostingoff = OffsetNumberPrev(newitemoff);
    1598             :     }
    1599             : 
    1600             :     /*
    1601             :      * The high key for the new left page is a possibly-truncated copy of
    1602             :      * firstright on the leaf level (it's "firstright itself" on internal
    1603             :      * pages; see !isleaf comments below).  This may seem to be contrary to
    1604             :      * Lehman & Yao's approach of using a copy of lastleft as the new high key
    1605             :      * when splitting on the leaf level.  It isn't, though.
    1606             :      *
    1607             :      * Suffix truncation will leave the left page's high key fully equal to
    1608             :      * lastleft when lastleft and firstright are equal prior to heap TID (that
    1609             :      * is, the tiebreaker TID value comes from lastleft).  It isn't actually
    1610             :      * necessary for a new leaf high key to be a copy of lastleft for the L&Y
    1611             :      * "subtree" invariant to hold.  It's sufficient to make sure that the new
    1612             :      * leaf high key is strictly less than firstright, and greater than or
    1613             :      * equal to (not necessarily equal to) lastleft.  In other words, when
    1614             :      * suffix truncation isn't possible during a leaf page split, we take
    1615             :      * L&Y's exact approach to generating a new high key for the left page.
    1616             :      * (Actually, that is slightly inaccurate.  We don't just use a copy of
    1617             :      * lastleft.  A tuple with all the keys from firstright but the max heap
    1618             :      * TID from lastleft is used, to avoid introducing a special case.)
    1619             :      */
    1620       22580 :     if (!newitemonleft && newitemoff == firstrightoff)
    1621             :     {
    1622             :         /* incoming tuple becomes firstright */
    1623          30 :         itemsz = newitemsz;
    1624          30 :         firstright = newitem;
    1625             :     }
    1626             :     else
    1627             :     {
    1628             :         /* existing item at firstrightoff becomes firstright */
    1629       22550 :         itemid = PageGetItemId(origpage, firstrightoff);
    1630       22550 :         itemsz = ItemIdGetLength(itemid);
    1631       22550 :         firstright = (IndexTuple) PageGetItem(origpage, itemid);
    1632       22550 :         if (firstrightoff == origpagepostingoff)
    1633           0 :             firstright = nposting;
    1634             :     }
    1635             : 
    1636       22580 :     if (isleaf)
    1637             :     {
    1638             :         IndexTuple  lastleft;
    1639             : 
    1640             :         /* Attempt suffix truncation for leaf page splits */
    1641       22378 :         if (newitemonleft && newitemoff == firstrightoff)
    1642             :         {
    1643             :             /* incoming tuple becomes lastleft */
    1644         426 :             lastleft = newitem;
    1645             :         }
    1646             :         else
    1647             :         {
    1648             :             OffsetNumber lastleftoff;
    1649             : 
    1650             :             /* existing item before firstrightoff becomes lastleft */
    1651       21952 :             lastleftoff = OffsetNumberPrev(firstrightoff);
    1652             :             Assert(lastleftoff >= P_FIRSTDATAKEY(oopaque));
    1653       21952 :             itemid = PageGetItemId(origpage, lastleftoff);
    1654       21952 :             lastleft = (IndexTuple) PageGetItem(origpage, itemid);
    1655       21952 :             if (lastleftoff == origpagepostingoff)
    1656           6 :                 lastleft = nposting;
    1657             :         }
    1658             : 
    1659       22378 :         lefthighkey = _bt_truncate(rel, lastleft, firstright, itup_key);
    1660       22378 :         itemsz = IndexTupleSize(lefthighkey);
    1661             :     }
    1662             :     else
    1663             :     {
    1664             :         /*
    1665             :          * Don't perform suffix truncation on a copy of firstright to make
    1666             :          * left page high key for internal page splits.  Must use firstright
    1667             :          * as new high key directly.
    1668             :          *
    1669             :          * Each distinct separator key value originates as a leaf level high
    1670             :          * key; all other separator keys/pivot tuples are copied from one
    1671             :          * level down.  A separator key in a grandparent page must be
    1672             :          * identical to high key in rightmost parent page of the subtree to
    1673             :          * its left, which must itself be identical to high key in rightmost
    1674             :          * child page of that same subtree (this even applies to separator
    1675             :          * from grandparent's high key).  There must always be an unbroken
    1676             :          * "seam" of identical separator keys that guide index scans at every
    1677             :          * level, starting from the grandparent.  That's why suffix truncation
    1678             :          * is unsafe here.
    1679             :          *
    1680             :          * Internal page splits will truncate firstright into a "negative
    1681             :          * infinity" data item when it gets inserted on the new right page
    1682             :          * below, though.  This happens during the call to _bt_pgaddtup() for
    1683             :          * the new first data item for right page.  Do not confuse this
    1684             :          * mechanism with suffix truncation.  It is just a convenient way of
    1685             :          * implementing page splits that split the internal page "inside"
    1686             :          * firstright.  The lefthighkey separator key cannot appear a second
    1687             :          * time in the right page (only firstright's downlink goes in right
    1688             :          * page).
    1689             :          */
    1690         202 :         lefthighkey = firstright;
    1691             :     }
    1692             : 
    1693             :     /*
    1694             :      * Add new high key to leftpage
    1695             :      */
    1696       22580 :     afterleftoff = P_HIKEY;
    1697             : 
    1698             :     Assert(BTreeTupleGetNAtts(lefthighkey, rel) > 0);
    1699             :     Assert(BTreeTupleGetNAtts(lefthighkey, rel) <=
    1700             :            IndexRelationGetNumberOfKeyAttributes(rel));
    1701             :     Assert(itemsz == MAXALIGN(IndexTupleSize(lefthighkey)));
    1702       22580 :     if (PageAddItem(leftpage, lefthighkey, itemsz, afterleftoff, false, false) == InvalidOffsetNumber)
    1703           0 :         elog(ERROR, "failed to add high key to the left sibling"
    1704             :              " while splitting block %u of index \"%s\"",
    1705             :              origpagenumber, RelationGetRelationName(rel));
    1706       22580 :     afterleftoff = OffsetNumberNext(afterleftoff);
    1707             : 
    1708             :     /*
    1709             :      * Acquire a new right page to split into, now that left page has a new
    1710             :      * high key.
    1711             :      *
    1712             :      * To not confuse future VACUUM operations, we zero the right page and
    1713             :      * work on an in-memory copy of it before writing WAL, then copy its
    1714             :      * contents back to the actual page once we start the critical section
    1715             :      * work.  This simplifies the split work, so as there is no need to zero
    1716             :      * the right page before throwing an error.
    1717             :      */
    1718       22580 :     rbuf = _bt_allocbuf(rel, heaprel);
    1719       22580 :     rightpage = rightpage_buf.data;
    1720             : 
    1721             :     /*
    1722             :      * Copy the contents of the right page into its temporary location, and
    1723             :      * zero the original space.
    1724             :      */
    1725       22580 :     memcpy(rightpage, BufferGetPage(rbuf), BLCKSZ);
    1726       22580 :     memset(BufferGetPage(rbuf), 0, BLCKSZ);
    1727       22580 :     rightpagenumber = BufferGetBlockNumber(rbuf);
    1728             :     /* rightpage was initialized by _bt_allocbuf */
    1729       22580 :     ropaque = BTPageGetOpaque(rightpage);
    1730             : 
    1731             :     /*
    1732             :      * Finish off remaining leftpage special area fields.  They cannot be set
    1733             :      * before both origpage (leftpage) and rightpage buffers are acquired and
    1734             :      * locked.
    1735             :      *
    1736             :      * btpo_cycleid is only used with leaf pages, though we set it here in all
    1737             :      * cases just to be consistent.
    1738             :      */
    1739       22580 :     lopaque->btpo_next = rightpagenumber;
    1740       22580 :     lopaque->btpo_cycleid = _bt_vacuum_cycleid(rel);
    1741             : 
    1742             :     /*
    1743             :      * rightpage won't be the root when we're done.  Also, clear the SPLIT_END
    1744             :      * and HAS_GARBAGE flags.
    1745             :      */
    1746       22580 :     ropaque->btpo_flags = oopaque->btpo_flags;
    1747       22580 :     ropaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
    1748       22580 :     ropaque->btpo_prev = origpagenumber;
    1749       22580 :     ropaque->btpo_next = oopaque->btpo_next;
    1750       22580 :     ropaque->btpo_level = oopaque->btpo_level;
    1751       22580 :     ropaque->btpo_cycleid = lopaque->btpo_cycleid;
    1752             : 
    1753             :     /*
    1754             :      * Add new high key to rightpage where necessary.
    1755             :      *
    1756             :      * If the page we're splitting is not the rightmost page at its level in
    1757             :      * the tree, then the first entry on the page is the high key from
    1758             :      * origpage.
    1759             :      */
    1760       22580 :     afterrightoff = P_HIKEY;
    1761             : 
    1762       22580 :     if (!isrightmost)
    1763             :     {
    1764             :         IndexTuple  righthighkey;
    1765             : 
    1766        9714 :         itemid = PageGetItemId(origpage, P_HIKEY);
    1767        9714 :         itemsz = ItemIdGetLength(itemid);
    1768        9714 :         righthighkey = (IndexTuple) PageGetItem(origpage, itemid);
    1769             :         Assert(BTreeTupleGetNAtts(righthighkey, rel) > 0);
    1770             :         Assert(BTreeTupleGetNAtts(righthighkey, rel) <=
    1771             :                IndexRelationGetNumberOfKeyAttributes(rel));
    1772        9714 :         if (PageAddItem(rightpage, righthighkey, itemsz, afterrightoff, false, false) == InvalidOffsetNumber)
    1773             :         {
    1774           0 :             elog(ERROR, "failed to add high key to the right sibling"
    1775             :                  " while splitting block %u of index \"%s\"",
    1776             :                  origpagenumber, RelationGetRelationName(rel));
    1777             :         }
    1778        9714 :         afterrightoff = OffsetNumberNext(afterrightoff);
    1779             :     }
    1780             : 
    1781             :     /*
    1782             :      * Internal page splits truncate first data item on right page -- it
    1783             :      * becomes "minus infinity" item for the page.  Set this up here.
    1784             :      */
    1785       22580 :     minusinfoff = InvalidOffsetNumber;
    1786       22580 :     if (!isleaf)
    1787         202 :         minusinfoff = afterrightoff;
    1788             : 
    1789             :     /*
    1790             :      * Now transfer all the data items (non-pivot tuples in isleaf case, or
    1791             :      * additional pivot tuples in !isleaf case) to the appropriate page.
    1792             :      *
    1793             :      * Note: we *must* insert at least the right page's items in item-number
    1794             :      * order, for the benefit of _bt_restore_page().
    1795             :      */
    1796     6877204 :     for (i = P_FIRSTDATAKEY(oopaque); i <= maxoff; i = OffsetNumberNext(i))
    1797             :     {
    1798             :         IndexTuple  dataitem;
    1799             : 
    1800     6854624 :         itemid = PageGetItemId(origpage, i);
    1801     6854624 :         itemsz = ItemIdGetLength(itemid);
    1802     6854624 :         dataitem = (IndexTuple) PageGetItem(origpage, itemid);
    1803             : 
    1804             :         /* replace original item with nposting due to posting split? */
    1805     6854624 :         if (i == origpagepostingoff)
    1806             :         {
    1807             :             Assert(BTreeTupleIsPosting(dataitem));
    1808             :             Assert(itemsz == MAXALIGN(IndexTupleSize(nposting)));
    1809          68 :             dataitem = nposting;
    1810             :         }
    1811             : 
    1812             :         /* does new item belong before this one? */
    1813     6854556 :         else if (i == newitemoff)
    1814             :         {
    1815       12982 :             if (newitemonleft)
    1816             :             {
    1817             :                 Assert(newitemoff <= firstrightoff);
    1818        3378 :                 if (!_bt_pgaddtup(leftpage, newitemsz, newitem, afterleftoff,
    1819             :                                   false))
    1820             :                 {
    1821           0 :                     elog(ERROR, "failed to add new item to the left sibling"
    1822             :                          " while splitting block %u of index \"%s\"",
    1823             :                          origpagenumber, RelationGetRelationName(rel));
    1824             :                 }
    1825        3378 :                 afterleftoff = OffsetNumberNext(afterleftoff);
    1826             :             }
    1827             :             else
    1828             :             {
    1829             :                 Assert(newitemoff >= firstrightoff);
    1830        9604 :                 if (!_bt_pgaddtup(rightpage, newitemsz, newitem, afterrightoff,
    1831             :                                   afterrightoff == minusinfoff))
    1832             :                 {
    1833           0 :                     elog(ERROR, "failed to add new item to the right sibling"
    1834             :                          " while splitting block %u of index \"%s\"",
    1835             :                          origpagenumber, RelationGetRelationName(rel));
    1836             :                 }
    1837        9604 :                 afterrightoff = OffsetNumberNext(afterrightoff);
    1838             :             }
    1839             :         }
    1840             : 
    1841             :         /* decide which page to put it on */
    1842     6854624 :         if (i < firstrightoff)
    1843             :         {
    1844     5206244 :             if (!_bt_pgaddtup(leftpage, itemsz, dataitem, afterleftoff, false))
    1845             :             {
    1846           0 :                 elog(ERROR, "failed to add old item to the left sibling"
    1847             :                      " while splitting block %u of index \"%s\"",
    1848             :                      origpagenumber, RelationGetRelationName(rel));
    1849             :             }
    1850     5206244 :             afterleftoff = OffsetNumberNext(afterleftoff);
    1851             :         }
    1852             :         else
    1853             :         {
    1854     1648380 :             if (!_bt_pgaddtup(rightpage, itemsz, dataitem, afterrightoff,
    1855             :                               afterrightoff == minusinfoff))
    1856             :             {
    1857           0 :                 elog(ERROR, "failed to add old item to the right sibling"
    1858             :                      " while splitting block %u of index \"%s\"",
    1859             :                      origpagenumber, RelationGetRelationName(rel));
    1860             :             }
    1861     1648380 :             afterrightoff = OffsetNumberNext(afterrightoff);
    1862             :         }
    1863             :     }
    1864             : 
    1865             :     /* Handle case where newitem goes at the end of rightpage */
    1866       22580 :     if (i <= newitemoff)
    1867             :     {
    1868             :         /*
    1869             :          * Can't have newitemonleft here; that would imply we were told to put
    1870             :          * *everything* on the left page, which cannot fit (if it could, we'd
    1871             :          * not be splitting the page).
    1872             :          */
    1873             :         Assert(!newitemonleft && newitemoff == maxoff + 1);
    1874        9598 :         if (!_bt_pgaddtup(rightpage, newitemsz, newitem, afterrightoff,
    1875             :                           afterrightoff == minusinfoff))
    1876             :         {
    1877           0 :             elog(ERROR, "failed to add new item to the right sibling"
    1878             :                  " while splitting block %u of index \"%s\"",
    1879             :                  origpagenumber, RelationGetRelationName(rel));
    1880             :         }
    1881        9598 :         afterrightoff = OffsetNumberNext(afterrightoff);
    1882             :     }
    1883             : 
    1884             :     /*
    1885             :      * We have to grab the original right sibling (if any) and update its prev
    1886             :      * link.  We are guaranteed that this is deadlock-free, since we couple
    1887             :      * the locks in the standard order: left to right.
    1888             :      */
    1889       22580 :     if (!isrightmost)
    1890             :     {
    1891        9714 :         sbuf = _bt_getbuf(rel, oopaque->btpo_next, BT_WRITE);
    1892        9714 :         spage = BufferGetPage(sbuf);
    1893        9714 :         sopaque = BTPageGetOpaque(spage);
    1894        9714 :         if (sopaque->btpo_prev != origpagenumber)
    1895             :         {
    1896           0 :             ereport(ERROR,
    1897             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1898             :                      errmsg_internal("right sibling's left-link doesn't match: "
    1899             :                                      "block %u links to %u instead of expected %u in index \"%s\"",
    1900             :                                      oopaque->btpo_next, sopaque->btpo_prev, origpagenumber,
    1901             :                                      RelationGetRelationName(rel))));
    1902             :         }
    1903             : 
    1904             :         /*
    1905             :          * Check to see if we can set the SPLIT_END flag in the right-hand
    1906             :          * split page; this can save some I/O for vacuum since it need not
    1907             :          * proceed to the right sibling.  We can set the flag if the right
    1908             :          * sibling has a different cycleid: that means it could not be part of
    1909             :          * a group of pages that were all split off from the same ancestor
    1910             :          * page.  If you're confused, imagine that page A splits to A B and
    1911             :          * then again, yielding A C B, while vacuum is in progress.  Tuples
    1912             :          * originally in A could now be in either B or C, hence vacuum must
    1913             :          * examine both pages.  But if D, our right sibling, has a different
    1914             :          * cycleid then it could not contain any tuples that were in A when
    1915             :          * the vacuum started.
    1916             :          */
    1917        9714 :         if (sopaque->btpo_cycleid != ropaque->btpo_cycleid)
    1918           0 :             ropaque->btpo_flags |= BTP_SPLIT_END;
    1919             :     }
    1920             : 
    1921             :     /*
    1922             :      * Right sibling is locked, new siblings are prepared, but original page
    1923             :      * is not updated yet.
    1924             :      *
    1925             :      * NO EREPORT(ERROR) till right sibling is updated.  We can get away with
    1926             :      * not starting the critical section till here because we haven't been
    1927             :      * scribbling on the original page yet; see comments above.
    1928             :      */
    1929       22580 :     START_CRIT_SECTION();
    1930             : 
    1931             :     /*
    1932             :      * By here, the original data page has been split into two new halves, and
    1933             :      * these are correct.  The algorithm requires that the left page never
    1934             :      * move during a split, so we copy the new left page back on top of the
    1935             :      * original.  We need to do this before writing the WAL record, so that
    1936             :      * XLogInsert can WAL log an image of the page if necessary.
    1937             :      */
    1938       22580 :     memcpy(origpage, leftpage, BLCKSZ);
    1939             :     /* leftpage, lopaque must not be used below here */
    1940             : 
    1941             :     /*
    1942             :      * Move the contents of the right page from its temporary location to the
    1943             :      * destination buffer, before writing the WAL record.  Unlike the left
    1944             :      * page, the right page and its opaque area are still needed to complete
    1945             :      * the update of the page, so reinitialize them.
    1946             :      */
    1947       22580 :     rightpage = BufferGetPage(rbuf);
    1948       22580 :     memcpy(rightpage, rightpage_buf.data, BLCKSZ);
    1949       22580 :     ropaque = BTPageGetOpaque(rightpage);
    1950             : 
    1951       22580 :     MarkBufferDirty(buf);
    1952       22580 :     MarkBufferDirty(rbuf);
    1953             : 
    1954       22580 :     if (!isrightmost)
    1955             :     {
    1956        9714 :         sopaque->btpo_prev = rightpagenumber;
    1957        9714 :         MarkBufferDirty(sbuf);
    1958             :     }
    1959             : 
    1960             :     /*
    1961             :      * Clear INCOMPLETE_SPLIT flag on child if inserting the new item finishes
    1962             :      * a split
    1963             :      */
    1964       22580 :     if (!isleaf)
    1965             :     {
    1966         202 :         Page        cpage = BufferGetPage(cbuf);
    1967         202 :         BTPageOpaque cpageop = BTPageGetOpaque(cpage);
    1968             : 
    1969         202 :         cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
    1970         202 :         MarkBufferDirty(cbuf);
    1971             :     }
    1972             : 
    1973             :     /* XLOG stuff */
    1974       22580 :     if (RelationNeedsWAL(rel))
    1975             :     {
    1976             :         xl_btree_split xlrec;
    1977             :         uint8       xlinfo;
    1978             :         XLogRecPtr  recptr;
    1979             : 
    1980       21290 :         xlrec.level = ropaque->btpo_level;
    1981             :         /* See comments below on newitem, orignewitem, and posting lists */
    1982       21290 :         xlrec.firstrightoff = firstrightoff;
    1983       21290 :         xlrec.newitemoff = newitemoff;
    1984       21290 :         xlrec.postingoff = 0;
    1985       21290 :         if (postingoff != 0 && origpagepostingoff < firstrightoff)
    1986          38 :             xlrec.postingoff = postingoff;
    1987             : 
    1988       21290 :         XLogBeginInsert();
    1989       21290 :         XLogRegisterData(&xlrec, SizeOfBtreeSplit);
    1990             : 
    1991       21290 :         XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    1992       21290 :         XLogRegisterBuffer(1, rbuf, REGBUF_WILL_INIT);
    1993             :         /* Log original right sibling, since we've changed its prev-pointer */
    1994       21290 :         if (!isrightmost)
    1995        9702 :             XLogRegisterBuffer(2, sbuf, REGBUF_STANDARD);
    1996       21290 :         if (!isleaf)
    1997         202 :             XLogRegisterBuffer(3, cbuf, REGBUF_STANDARD);
    1998             : 
    1999             :         /*
    2000             :          * Log the new item, if it was inserted on the left page. (If it was
    2001             :          * put on the right page, we don't need to explicitly WAL log it
    2002             :          * because it's included with all the other items on the right page.)
    2003             :          * Show the new item as belonging to the left page buffer, so that it
    2004             :          * is not stored if XLogInsert decides it needs a full-page image of
    2005             :          * the left page.  We always store newitemoff in the record, though.
    2006             :          *
    2007             :          * The details are sometimes slightly different for page splits that
    2008             :          * coincide with a posting list split.  If both the replacement
    2009             :          * posting list and newitem go on the right page, then we don't need
    2010             :          * to log anything extra, just like the simple !newitemonleft
    2011             :          * no-posting-split case (postingoff is set to zero in the WAL record,
    2012             :          * so recovery doesn't need to process a posting list split at all).
    2013             :          * Otherwise, we set postingoff and log orignewitem instead of
    2014             :          * newitem, despite having actually inserted newitem.  REDO routine
    2015             :          * must reconstruct nposting and newitem using _bt_swap_posting().
    2016             :          *
    2017             :          * Note: It's possible that our page split point is the point that
    2018             :          * makes the posting list lastleft and newitem firstright.  This is
    2019             :          * the only case where we log orignewitem/newitem despite newitem
    2020             :          * going on the right page.  If XLogInsert decides that it can omit
    2021             :          * orignewitem due to logging a full-page image of the left page,
    2022             :          * everything still works out, since recovery only needs to log
    2023             :          * orignewitem for items on the left page (just like the regular
    2024             :          * newitem-logged case).
    2025             :          */
    2026       21290 :         if (newitemonleft && xlrec.postingoff == 0)
    2027        3340 :             XLogRegisterBufData(0, newitem, newitemsz);
    2028       17950 :         else if (xlrec.postingoff != 0)
    2029             :         {
    2030             :             Assert(isleaf);
    2031             :             Assert(newitemonleft || firstrightoff == newitemoff);
    2032             :             Assert(newitemsz == IndexTupleSize(orignewitem));
    2033          38 :             XLogRegisterBufData(0, orignewitem, newitemsz);
    2034             :         }
    2035             : 
    2036             :         /* Log the left page's new high key */
    2037       21290 :         if (!isleaf)
    2038             :         {
    2039             :             /* lefthighkey isn't local copy, get current pointer */
    2040         202 :             itemid = PageGetItemId(origpage, P_HIKEY);
    2041         202 :             lefthighkey = (IndexTuple) PageGetItem(origpage, itemid);
    2042             :         }
    2043       21290 :         XLogRegisterBufData(0, lefthighkey,
    2044       21290 :                             MAXALIGN(IndexTupleSize(lefthighkey)));
    2045             : 
    2046             :         /*
    2047             :          * Log the contents of the right page in the format understood by
    2048             :          * _bt_restore_page().  The whole right page will be recreated.
    2049             :          *
    2050             :          * Direct access to page is not good but faster - we should implement
    2051             :          * some new func in page API.  Note we only store the tuples
    2052             :          * themselves, knowing that they were inserted in item-number order
    2053             :          * and so the line pointers can be reconstructed.  See comments for
    2054             :          * _bt_restore_page().
    2055             :          */
    2056       21290 :         XLogRegisterBufData(1,
    2057       21290 :                             (char *) rightpage + ((PageHeader) rightpage)->pd_upper,
    2058       21290 :                             ((PageHeader) rightpage)->pd_special - ((PageHeader) rightpage)->pd_upper);
    2059             : 
    2060       21290 :         xlinfo = newitemonleft ? XLOG_BTREE_SPLIT_L : XLOG_BTREE_SPLIT_R;
    2061       21290 :         recptr = XLogInsert(RM_BTREE_ID, xlinfo);
    2062             : 
    2063       21290 :         PageSetLSN(origpage, recptr);
    2064       21290 :         PageSetLSN(rightpage, recptr);
    2065       21290 :         if (!isrightmost)
    2066        9702 :             PageSetLSN(spage, recptr);
    2067       21290 :         if (!isleaf)
    2068         202 :             PageSetLSN(BufferGetPage(cbuf), recptr);
    2069             :     }
    2070             : 
    2071       22580 :     END_CRIT_SECTION();
    2072             : 
    2073             :     /* release the old right sibling */
    2074       22580 :     if (!isrightmost)
    2075        9714 :         _bt_relbuf(rel, sbuf);
    2076             : 
    2077             :     /* release the child */
    2078       22580 :     if (!isleaf)
    2079         202 :         _bt_relbuf(rel, cbuf);
    2080             : 
    2081             :     /* be tidy */
    2082       22580 :     if (isleaf)
    2083       22378 :         pfree(lefthighkey);
    2084             : 
    2085             :     /* split's done */
    2086       22580 :     return rbuf;
    2087             : }
    2088             : 
    2089             : /*
    2090             :  * _bt_insert_parent() -- Insert downlink into parent, completing split.
    2091             :  *
    2092             :  * On entry, buf and rbuf are the left and right split pages, which we
    2093             :  * still hold write locks on.  Both locks will be released here.  We
    2094             :  * release the rbuf lock once we have a write lock on the page that we
    2095             :  * intend to insert a downlink to rbuf on (i.e. buf's current parent page).
    2096             :  * The lock on buf is released at the same point as the lock on the parent
    2097             :  * page, since buf's INCOMPLETE_SPLIT flag must be cleared by the same
    2098             :  * atomic operation that completes the split by inserting a new downlink.
    2099             :  *
    2100             :  * stack - stack showing how we got here.  Will be NULL when splitting true
    2101             :  *          root, or during concurrent root split, where we can be inefficient
    2102             :  * isroot - we split the true root
    2103             :  * isonly - we split a page alone on its level (might have been fast root)
    2104             :  */
    2105             : static void
    2106       22580 : _bt_insert_parent(Relation rel,
    2107             :                   Relation heaprel,
    2108             :                   Buffer buf,
    2109             :                   Buffer rbuf,
    2110             :                   BTStack stack,
    2111             :                   bool isroot,
    2112             :                   bool isonly)
    2113             : {
    2114             :     Assert(heaprel != NULL);
    2115             : 
    2116             :     /*
    2117             :      * Here we have to do something Lehman and Yao don't talk about: deal with
    2118             :      * a root split and construction of a new root.  If our stack is empty
    2119             :      * then we have just split a node on what had been the root level when we
    2120             :      * descended the tree.  If it was still the root then we perform a
    2121             :      * new-root construction.  If it *wasn't* the root anymore, search to find
    2122             :      * the next higher level that someone constructed meanwhile, and find the
    2123             :      * right place to insert as for the normal case.
    2124             :      *
    2125             :      * If we have to search for the parent level, we do so by re-descending
    2126             :      * from the root.  This is not super-efficient, but it's rare enough not
    2127             :      * to matter.
    2128             :      */
    2129       22580 :     if (isroot)
    2130             :     {
    2131             :         Buffer      rootbuf;
    2132             : 
    2133             :         Assert(stack == NULL);
    2134             :         Assert(isonly);
    2135             :         /* create a new root node one level up and update the metapage */
    2136        1364 :         rootbuf = _bt_newlevel(rel, heaprel, buf, rbuf);
    2137             :         /* release the split buffers */
    2138        1364 :         _bt_relbuf(rel, rootbuf);
    2139        1364 :         _bt_relbuf(rel, rbuf);
    2140        1364 :         _bt_relbuf(rel, buf);
    2141             :     }
    2142             :     else
    2143             :     {
    2144       21216 :         BlockNumber bknum = BufferGetBlockNumber(buf);
    2145       21216 :         BlockNumber rbknum = BufferGetBlockNumber(rbuf);
    2146       21216 :         Page        page = BufferGetPage(buf);
    2147             :         IndexTuple  new_item;
    2148             :         BTStackData fakestack;
    2149             :         IndexTuple  ritem;
    2150             :         Buffer      pbuf;
    2151             : 
    2152       21216 :         if (stack == NULL)
    2153             :         {
    2154             :             BTPageOpaque opaque;
    2155             : 
    2156          22 :             elog(DEBUG2, "concurrent ROOT page split");
    2157          22 :             opaque = BTPageGetOpaque(page);
    2158             : 
    2159             :             /*
    2160             :              * We should never reach here when a leaf page split takes place
    2161             :              * despite the insert of newitem being able to apply the fastpath
    2162             :              * optimization.  Make sure of that with an assertion.
    2163             :              *
    2164             :              * This is more of a performance issue than a correctness issue.
    2165             :              * The fastpath won't have a descent stack.  Using a phony stack
    2166             :              * here works, but never rely on that.  The fastpath should be
    2167             :              * rejected within _bt_search_insert() when the rightmost leaf
    2168             :              * page will split, since it's faster to go through _bt_search()
    2169             :              * and get a stack in the usual way.
    2170             :              */
    2171             :             Assert(!(P_ISLEAF(opaque) &&
    2172             :                      BlockNumberIsValid(RelationGetTargetBlock(rel))));
    2173             : 
    2174             :             /* Find the leftmost page at the next level up */
    2175          22 :             pbuf = _bt_get_endpoint(rel, opaque->btpo_level + 1, false);
    2176             :             /* Set up a phony stack entry pointing there */
    2177          22 :             stack = &fakestack;
    2178          22 :             stack->bts_blkno = BufferGetBlockNumber(pbuf);
    2179          22 :             stack->bts_offset = InvalidOffsetNumber;
    2180          22 :             stack->bts_parent = NULL;
    2181          22 :             _bt_relbuf(rel, pbuf);
    2182             :         }
    2183             : 
    2184             :         /* get high key from left, a strict lower bound for new right page */
    2185       21216 :         ritem = (IndexTuple) PageGetItem(page,
    2186       21216 :                                          PageGetItemId(page, P_HIKEY));
    2187             : 
    2188             :         /* form an index tuple that points at the new right page */
    2189       21216 :         new_item = CopyIndexTuple(ritem);
    2190       21216 :         BTreeTupleSetDownLink(new_item, rbknum);
    2191             : 
    2192             :         /*
    2193             :          * Re-find and write lock the parent of buf.
    2194             :          *
    2195             :          * It's possible that the location of buf's downlink has changed since
    2196             :          * our initial _bt_search() descent.  _bt_getstackbuf() will detect
    2197             :          * and recover from this, updating the stack, which ensures that the
    2198             :          * new downlink will be inserted at the correct offset. Even buf's
    2199             :          * parent may have changed.
    2200             :          */
    2201       21216 :         pbuf = _bt_getstackbuf(rel, heaprel, stack, bknum);
    2202             : 
    2203             :         /*
    2204             :          * Unlock the right child.  The left child will be unlocked in
    2205             :          * _bt_insertonpg().
    2206             :          *
    2207             :          * Unlocking the right child must be delayed until here to ensure that
    2208             :          * no concurrent VACUUM operation can become confused.  Page deletion
    2209             :          * cannot be allowed to fail to re-find a downlink for the rbuf page.
    2210             :          * (Actually, this is just a vestige of how things used to work.  The
    2211             :          * page deletion code is expected to check for the INCOMPLETE_SPLIT
    2212             :          * flag on the left child.  It won't attempt deletion of the right
    2213             :          * child until the split is complete.  Despite all this, we opt to
    2214             :          * conservatively delay unlocking the right child until here.)
    2215             :          */
    2216       21216 :         _bt_relbuf(rel, rbuf);
    2217             : 
    2218       21216 :         if (pbuf == InvalidBuffer)
    2219           0 :             ereport(ERROR,
    2220             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2221             :                      errmsg_internal("failed to re-find parent key in index \"%s\" for split pages %u/%u",
    2222             :                                      RelationGetRelationName(rel), bknum, rbknum)));
    2223             : 
    2224             :         /* Recursively insert into the parent */
    2225       42432 :         _bt_insertonpg(rel, heaprel, NULL, pbuf, buf, stack->bts_parent,
    2226       21216 :                        new_item, MAXALIGN(IndexTupleSize(new_item)),
    2227       21216 :                        stack->bts_offset + 1, 0, isonly);
    2228             : 
    2229             :         /* be tidy */
    2230       21216 :         pfree(new_item);
    2231             :     }
    2232       22580 : }
    2233             : 
    2234             : /*
    2235             :  * _bt_finish_split() -- Finish an incomplete split
    2236             :  *
    2237             :  * A crash or other failure can leave a split incomplete.  The insertion
    2238             :  * routines won't allow to insert on a page that is incompletely split.
    2239             :  * Before inserting on such a page, call _bt_finish_split().
    2240             :  *
    2241             :  * On entry, 'lbuf' must be locked in write-mode.  On exit, it is unlocked
    2242             :  * and unpinned.
    2243             :  *
    2244             :  * Caller must provide a valid heaprel, since finishing a page split requires
    2245             :  * allocating a new page if and when the parent page splits in turn.
    2246             :  */
    2247             : void
    2248           0 : _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf, BTStack stack)
    2249             : {
    2250           0 :     Page        lpage = BufferGetPage(lbuf);
    2251           0 :     BTPageOpaque lpageop = BTPageGetOpaque(lpage);
    2252             :     Buffer      rbuf;
    2253             :     Page        rpage;
    2254             :     BTPageOpaque rpageop;
    2255             :     bool        wasroot;
    2256             :     bool        wasonly;
    2257             : 
    2258             :     Assert(P_INCOMPLETE_SPLIT(lpageop));
    2259             :     Assert(heaprel != NULL);
    2260             : 
    2261             :     /* Lock right sibling, the one missing the downlink */
    2262           0 :     rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
    2263           0 :     rpage = BufferGetPage(rbuf);
    2264           0 :     rpageop = BTPageGetOpaque(rpage);
    2265             : 
    2266             :     /* Could this be a root split? */
    2267           0 :     if (!stack)
    2268             :     {
    2269             :         Buffer      metabuf;
    2270             :         Page        metapg;
    2271             :         BTMetaPageData *metad;
    2272             : 
    2273             :         /* acquire lock on the metapage */
    2274           0 :         metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    2275           0 :         metapg = BufferGetPage(metabuf);
    2276           0 :         metad = BTPageGetMeta(metapg);
    2277             : 
    2278           0 :         wasroot = (metad->btm_root == BufferGetBlockNumber(lbuf));
    2279             : 
    2280           0 :         _bt_relbuf(rel, metabuf);
    2281             :     }
    2282             :     else
    2283           0 :         wasroot = false;
    2284             : 
    2285             :     /* Was this the only page on the level before split? */
    2286           0 :     wasonly = (P_LEFTMOST(lpageop) && P_RIGHTMOST(rpageop));
    2287             : 
    2288           0 :     elog(DEBUG1, "finishing incomplete split of %u/%u",
    2289             :          BufferGetBlockNumber(lbuf), BufferGetBlockNumber(rbuf));
    2290             : 
    2291           0 :     _bt_insert_parent(rel, heaprel, lbuf, rbuf, stack, wasroot, wasonly);
    2292           0 : }
    2293             : 
    2294             : /*
    2295             :  *  _bt_getstackbuf() -- Walk back up the tree one step, and find the pivot
    2296             :  *                       tuple whose downlink points to child page.
    2297             :  *
    2298             :  *      Caller passes child's block number, which is used to identify
    2299             :  *      associated pivot tuple in parent page using a linear search that
    2300             :  *      matches on pivot's downlink/block number.  The expected location of
    2301             :  *      the pivot tuple is taken from the stack one level above the child
    2302             :  *      page.  This is used as a starting point.  Insertions into the
    2303             :  *      parent level could cause the pivot tuple to move right; deletions
    2304             :  *      could cause it to move left, but not left of the page we previously
    2305             :  *      found it on.
    2306             :  *
    2307             :  *      Caller can use its stack to relocate the pivot tuple/downlink for
    2308             :  *      any same-level page to the right of the page found by its initial
    2309             :  *      descent.  This is necessary because of the possibility that caller
    2310             :  *      moved right to recover from a concurrent page split.  It's also
    2311             :  *      convenient for certain callers to be able to step right when there
    2312             :  *      wasn't a concurrent page split, while still using their original
    2313             :  *      stack.  For example, the checkingunique _bt_doinsert() case may
    2314             :  *      have to step right when there are many physical duplicates, and its
    2315             :  *      scantid forces an insertion to the right of the "first page the
    2316             :  *      value could be on".  (This is also relied on by all of our callers
    2317             :  *      when dealing with !heapkeyspace indexes.)
    2318             :  *
    2319             :  *      Returns write-locked parent page buffer, or InvalidBuffer if pivot
    2320             :  *      tuple not found (should not happen).  Adjusts bts_blkno &
    2321             :  *      bts_offset if changed.  Page split caller should insert its new
    2322             :  *      pivot tuple for its new right sibling page on parent page, at the
    2323             :  *      offset number bts_offset + 1.
    2324             :  */
    2325             : Buffer
    2326       27008 : _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child)
    2327             : {
    2328             :     BlockNumber blkno;
    2329             :     OffsetNumber start;
    2330             : 
    2331       27008 :     blkno = stack->bts_blkno;
    2332       27008 :     start = stack->bts_offset;
    2333             : 
    2334             :     for (;;)
    2335           0 :     {
    2336             :         Buffer      buf;
    2337             :         Page        page;
    2338             :         BTPageOpaque opaque;
    2339             : 
    2340       27008 :         buf = _bt_getbuf(rel, blkno, BT_WRITE);
    2341       27008 :         page = BufferGetPage(buf);
    2342       27008 :         opaque = BTPageGetOpaque(page);
    2343             : 
    2344             :         Assert(heaprel != NULL);
    2345       27008 :         if (P_INCOMPLETE_SPLIT(opaque))
    2346             :         {
    2347           0 :             _bt_finish_split(rel, heaprel, buf, stack->bts_parent);
    2348           0 :             continue;
    2349             :         }
    2350             : 
    2351       27008 :         if (!P_IGNORE(opaque))
    2352             :         {
    2353             :             OffsetNumber offnum,
    2354             :                         minoff,
    2355             :                         maxoff;
    2356             :             ItemId      itemid;
    2357             :             IndexTuple  item;
    2358             : 
    2359       27008 :             minoff = P_FIRSTDATAKEY(opaque);
    2360       27008 :             maxoff = PageGetMaxOffsetNumber(page);
    2361             : 
    2362             :             /*
    2363             :              * start = InvalidOffsetNumber means "search the whole page". We
    2364             :              * need this test anyway due to possibility that page has a high
    2365             :              * key now when it didn't before.
    2366             :              */
    2367       27008 :             if (start < minoff)
    2368          22 :                 start = minoff;
    2369             : 
    2370             :             /*
    2371             :              * Need this check too, to guard against possibility that page
    2372             :              * split since we visited it originally.
    2373             :              */
    2374       27008 :             if (start > maxoff)
    2375           0 :                 start = OffsetNumberNext(maxoff);
    2376             : 
    2377             :             /*
    2378             :              * These loops will check every item on the page --- but in an
    2379             :              * order that's attuned to the probability of where it actually
    2380             :              * is.  Scan to the right first, then to the left.
    2381             :              */
    2382       27008 :             for (offnum = start;
    2383       27100 :                  offnum <= maxoff;
    2384          92 :                  offnum = OffsetNumberNext(offnum))
    2385             :             {
    2386       27100 :                 itemid = PageGetItemId(page, offnum);
    2387       27100 :                 item = (IndexTuple) PageGetItem(page, itemid);
    2388             : 
    2389       27100 :                 if (BTreeTupleGetDownLink(item) == child)
    2390             :                 {
    2391             :                     /* Return accurate pointer to where link is now */
    2392       27008 :                     stack->bts_blkno = blkno;
    2393       27008 :                     stack->bts_offset = offnum;
    2394       27008 :                     return buf;
    2395             :                 }
    2396             :             }
    2397             : 
    2398           0 :             for (offnum = OffsetNumberPrev(start);
    2399           0 :                  offnum >= minoff;
    2400           0 :                  offnum = OffsetNumberPrev(offnum))
    2401             :             {
    2402           0 :                 itemid = PageGetItemId(page, offnum);
    2403           0 :                 item = (IndexTuple) PageGetItem(page, itemid);
    2404             : 
    2405           0 :                 if (BTreeTupleGetDownLink(item) == child)
    2406             :                 {
    2407             :                     /* Return accurate pointer to where link is now */
    2408           0 :                     stack->bts_blkno = blkno;
    2409           0 :                     stack->bts_offset = offnum;
    2410           0 :                     return buf;
    2411             :                 }
    2412             :             }
    2413             :         }
    2414             : 
    2415             :         /*
    2416             :          * The item we're looking for moved right at least one page.
    2417             :          *
    2418             :          * Lehman and Yao couple/chain locks when moving right here, which we
    2419             :          * can avoid.  See nbtree/README.
    2420             :          */
    2421           0 :         if (P_RIGHTMOST(opaque))
    2422             :         {
    2423           0 :             _bt_relbuf(rel, buf);
    2424           0 :             return InvalidBuffer;
    2425             :         }
    2426           0 :         blkno = opaque->btpo_next;
    2427           0 :         start = InvalidOffsetNumber;
    2428           0 :         _bt_relbuf(rel, buf);
    2429             :     }
    2430             : }
    2431             : 
    2432             : /*
    2433             :  *  _bt_newlevel() -- Create a new level above root page.
    2434             :  *
    2435             :  *      We've just split the old root page and need to create a new one.
    2436             :  *      In order to do this, we add a new root page to the file, then lock
    2437             :  *      the metadata page and update it.  This is guaranteed to be deadlock-
    2438             :  *      free, because all readers release their locks on the metadata page
    2439             :  *      before trying to lock the root, and all writers lock the root before
    2440             :  *      trying to lock the metadata page.  We have a write lock on the old
    2441             :  *      root page, so we have not introduced any cycles into the waits-for
    2442             :  *      graph.
    2443             :  *
    2444             :  *      On entry, lbuf (the old root) and rbuf (its new peer) are write-
    2445             :  *      locked. On exit, a new root page exists with entries for the
    2446             :  *      two new children, metapage is updated and unlocked/unpinned.
    2447             :  *      The new root buffer is returned to caller which has to unlock/unpin
    2448             :  *      lbuf, rbuf & rootbuf.
    2449             :  */
    2450             : static Buffer
    2451        1364 : _bt_newlevel(Relation rel, Relation heaprel, Buffer lbuf, Buffer rbuf)
    2452             : {
    2453             :     Buffer      rootbuf;
    2454             :     Page        lpage,
    2455             :                 rootpage;
    2456             :     BlockNumber lbkno,
    2457             :                 rbkno;
    2458             :     BlockNumber rootblknum;
    2459             :     BTPageOpaque rootopaque;
    2460             :     BTPageOpaque lopaque;
    2461             :     ItemId      itemid;
    2462             :     IndexTuple  item;
    2463             :     IndexTuple  left_item;
    2464             :     Size        left_item_sz;
    2465             :     IndexTuple  right_item;
    2466             :     Size        right_item_sz;
    2467             :     Buffer      metabuf;
    2468             :     Page        metapg;
    2469             :     BTMetaPageData *metad;
    2470             : 
    2471        1364 :     lbkno = BufferGetBlockNumber(lbuf);
    2472        1364 :     rbkno = BufferGetBlockNumber(rbuf);
    2473        1364 :     lpage = BufferGetPage(lbuf);
    2474        1364 :     lopaque = BTPageGetOpaque(lpage);
    2475             : 
    2476             :     /* get a new root page */
    2477        1364 :     rootbuf = _bt_allocbuf(rel, heaprel);
    2478        1364 :     rootpage = BufferGetPage(rootbuf);
    2479        1364 :     rootblknum = BufferGetBlockNumber(rootbuf);
    2480             : 
    2481             :     /* acquire lock on the metapage */
    2482        1364 :     metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    2483        1364 :     metapg = BufferGetPage(metabuf);
    2484        1364 :     metad = BTPageGetMeta(metapg);
    2485             : 
    2486             :     /*
    2487             :      * Create downlink item for left page (old root).  The key value used is
    2488             :      * "minus infinity", a sentinel value that's reliably less than any real
    2489             :      * key value that could appear in the left page.
    2490             :      */
    2491        1364 :     left_item_sz = sizeof(IndexTupleData);
    2492        1364 :     left_item = (IndexTuple) palloc(left_item_sz);
    2493        1364 :     left_item->t_info = left_item_sz;
    2494        1364 :     BTreeTupleSetDownLink(left_item, lbkno);
    2495        1364 :     BTreeTupleSetNAtts(left_item, 0, false);
    2496             : 
    2497             :     /*
    2498             :      * Create downlink item for right page.  The key for it is obtained from
    2499             :      * the "high key" position in the left page.
    2500             :      */
    2501        1364 :     itemid = PageGetItemId(lpage, P_HIKEY);
    2502        1364 :     right_item_sz = ItemIdGetLength(itemid);
    2503        1364 :     item = (IndexTuple) PageGetItem(lpage, itemid);
    2504        1364 :     right_item = CopyIndexTuple(item);
    2505        1364 :     BTreeTupleSetDownLink(right_item, rbkno);
    2506             : 
    2507             :     /* NO EREPORT(ERROR) from here till newroot op is logged */
    2508        1364 :     START_CRIT_SECTION();
    2509             : 
    2510             :     /* upgrade metapage if needed */
    2511        1364 :     if (metad->btm_version < BTREE_NOVAC_VERSION)
    2512           0 :         _bt_upgrademetapage(metapg);
    2513             : 
    2514             :     /* set btree special data */
    2515        1364 :     rootopaque = BTPageGetOpaque(rootpage);
    2516        1364 :     rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
    2517        1364 :     rootopaque->btpo_flags = BTP_ROOT;
    2518        1364 :     rootopaque->btpo_level =
    2519        1364 :         (BTPageGetOpaque(lpage))->btpo_level + 1;
    2520        1364 :     rootopaque->btpo_cycleid = 0;
    2521             : 
    2522             :     /* update metapage data */
    2523        1364 :     metad->btm_root = rootblknum;
    2524        1364 :     metad->btm_level = rootopaque->btpo_level;
    2525        1364 :     metad->btm_fastroot = rootblknum;
    2526        1364 :     metad->btm_fastlevel = rootopaque->btpo_level;
    2527             : 
    2528             :     /*
    2529             :      * Insert the left page pointer into the new root page.  The root page is
    2530             :      * the rightmost page on its level so there is no "high key" in it; the
    2531             :      * two items will go into positions P_HIKEY and P_FIRSTKEY.
    2532             :      *
    2533             :      * Note: we *must* insert the two items in item-number order, for the
    2534             :      * benefit of _bt_restore_page().
    2535             :      */
    2536             :     Assert(BTreeTupleGetNAtts(left_item, rel) == 0);
    2537        1364 :     if (PageAddItem(rootpage, left_item, left_item_sz, P_HIKEY, false, false) == InvalidOffsetNumber)
    2538           0 :         elog(PANIC, "failed to add leftkey to new root page"
    2539             :              " while splitting block %u of index \"%s\"",
    2540             :              BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
    2541             : 
    2542             :     /*
    2543             :      * insert the right page pointer into the new root page.
    2544             :      */
    2545             :     Assert(BTreeTupleGetNAtts(right_item, rel) > 0);
    2546             :     Assert(BTreeTupleGetNAtts(right_item, rel) <=
    2547             :            IndexRelationGetNumberOfKeyAttributes(rel));
    2548        1364 :     if (PageAddItem(rootpage, right_item, right_item_sz, P_FIRSTKEY, false, false) == InvalidOffsetNumber)
    2549           0 :         elog(PANIC, "failed to add rightkey to new root page"
    2550             :              " while splitting block %u of index \"%s\"",
    2551             :              BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
    2552             : 
    2553             :     /* Clear the incomplete-split flag in the left child */
    2554             :     Assert(P_INCOMPLETE_SPLIT(lopaque));
    2555        1364 :     lopaque->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
    2556        1364 :     MarkBufferDirty(lbuf);
    2557             : 
    2558        1364 :     MarkBufferDirty(rootbuf);
    2559        1364 :     MarkBufferDirty(metabuf);
    2560             : 
    2561             :     /* XLOG stuff */
    2562        1364 :     if (RelationNeedsWAL(rel))
    2563             :     {
    2564             :         xl_btree_newroot xlrec;
    2565             :         XLogRecPtr  recptr;
    2566             :         xl_btree_metadata md;
    2567             : 
    2568        1328 :         xlrec.rootblk = rootblknum;
    2569        1328 :         xlrec.level = metad->btm_level;
    2570             : 
    2571        1328 :         XLogBeginInsert();
    2572        1328 :         XLogRegisterData(&xlrec, SizeOfBtreeNewroot);
    2573             : 
    2574        1328 :         XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
    2575        1328 :         XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
    2576        1328 :         XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
    2577             : 
    2578             :         Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
    2579        1328 :         md.version = metad->btm_version;
    2580        1328 :         md.root = rootblknum;
    2581        1328 :         md.level = metad->btm_level;
    2582        1328 :         md.fastroot = rootblknum;
    2583        1328 :         md.fastlevel = metad->btm_level;
    2584        1328 :         md.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
    2585        1328 :         md.allequalimage = metad->btm_allequalimage;
    2586             : 
    2587        1328 :         XLogRegisterBufData(2, &md, sizeof(xl_btree_metadata));
    2588             : 
    2589             :         /*
    2590             :          * Direct access to page is not good but faster - we should implement
    2591             :          * some new func in page API.
    2592             :          */
    2593        1328 :         XLogRegisterBufData(0,
    2594        1328 :                             (char *) rootpage + ((PageHeader) rootpage)->pd_upper,
    2595        1328 :                             ((PageHeader) rootpage)->pd_special -
    2596        1328 :                             ((PageHeader) rootpage)->pd_upper);
    2597             : 
    2598        1328 :         recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
    2599             : 
    2600        1328 :         PageSetLSN(lpage, recptr);
    2601        1328 :         PageSetLSN(rootpage, recptr);
    2602        1328 :         PageSetLSN(metapg, recptr);
    2603             :     }
    2604             : 
    2605        1364 :     END_CRIT_SECTION();
    2606             : 
    2607             :     /* done with metapage */
    2608        1364 :     _bt_relbuf(rel, metabuf);
    2609             : 
    2610        1364 :     pfree(left_item);
    2611        1364 :     pfree(right_item);
    2612             : 
    2613        1364 :     return rootbuf;
    2614             : }
    2615             : 
    2616             : /*
    2617             :  *  _bt_pgaddtup() -- add a data item to a particular page during split.
    2618             :  *
    2619             :  *      The difference between this routine and a bare PageAddItem call is
    2620             :  *      that this code can deal with the first data item on an internal btree
    2621             :  *      page in passing.  This data item (which is called "firstright" within
    2622             :  *      _bt_split()) has a key that must be treated as minus infinity after
    2623             :  *      the split.  Therefore, we truncate away all attributes when caller
    2624             :  *      specifies it's the first data item on page (downlink is not changed,
    2625             :  *      though).  This extra step is only needed for the right page of an
    2626             :  *      internal page split.  There is no need to do this for the first data
    2627             :  *      item on the existing/left page, since that will already have been
    2628             :  *      truncated during an earlier page split.
    2629             :  *
    2630             :  *      See _bt_split() for a high level explanation of why we truncate here.
    2631             :  *      Note that this routine has nothing to do with suffix truncation,
    2632             :  *      despite using some of the same infrastructure.
    2633             :  */
    2634             : static inline bool
    2635     6877204 : _bt_pgaddtup(Page page,
    2636             :              Size itemsize,
    2637             :              const IndexTupleData *itup,
    2638             :              OffsetNumber itup_off,
    2639             :              bool newfirstdataitem)
    2640             : {
    2641             :     IndexTupleData trunctuple;
    2642             : 
    2643     6877204 :     if (newfirstdataitem)
    2644             :     {
    2645         202 :         trunctuple = *itup;
    2646         202 :         trunctuple.t_info = sizeof(IndexTupleData);
    2647         202 :         BTreeTupleSetNAtts(&trunctuple, 0, false);
    2648         202 :         itup = &trunctuple;
    2649         202 :         itemsize = sizeof(IndexTupleData);
    2650             :     }
    2651             : 
    2652     6877204 :     if (unlikely(PageAddItem(page, itup, itemsize, itup_off, false, false) == InvalidOffsetNumber))
    2653           0 :         return false;
    2654             : 
    2655     6877204 :     return true;
    2656             : }
    2657             : 
    2658             : /*
    2659             :  * _bt_delete_or_dedup_one_page - Try to avoid a leaf page split.
    2660             :  *
    2661             :  * There are three operations performed here: simple index deletion, bottom-up
    2662             :  * index deletion, and deduplication.  If all three operations fail to free
    2663             :  * enough space for the incoming item then caller will go on to split the
    2664             :  * page.  We always consider simple deletion first.  If that doesn't work out
    2665             :  * we consider alternatives.  Callers that only want us to consider simple
    2666             :  * deletion (without any fallback) ask for that using the 'simpleonly'
    2667             :  * argument.
    2668             :  *
    2669             :  * We usually pick only one alternative "complex" operation when simple
    2670             :  * deletion alone won't prevent a page split.  The 'checkingunique',
    2671             :  * 'uniquedup', and 'indexUnchanged' arguments are used for that.
    2672             :  *
    2673             :  * Note: We used to only delete LP_DEAD items when the BTP_HAS_GARBAGE page
    2674             :  * level flag was found set.  The flag was useful back when there wasn't
    2675             :  * necessarily one single page for a duplicate tuple to go on (before heap TID
    2676             :  * became a part of the key space in version 4 indexes).  But we don't
    2677             :  * actually look at the flag anymore (it's not a gating condition for our
    2678             :  * caller).  That would cause us to miss tuples that are safe to delete,
    2679             :  * without getting any benefit in return.  We know that the alternative is to
    2680             :  * split the page; scanning the line pointer array in passing won't have
    2681             :  * noticeable overhead.  (We still maintain the BTP_HAS_GARBAGE flag despite
    2682             :  * all this because !heapkeyspace indexes must still do a "getting tired"
    2683             :  * linear search, and so are likely to get some benefit from using it as a
    2684             :  * gating condition.)
    2685             :  */
    2686             : static void
    2687       51290 : _bt_delete_or_dedup_one_page(Relation rel, Relation heapRel,
    2688             :                              BTInsertState insertstate,
    2689             :                              bool simpleonly, bool checkingunique,
    2690             :                              bool uniquedup, bool indexUnchanged)
    2691             : {
    2692             :     OffsetNumber deletable[MaxIndexTuplesPerPage];
    2693       51290 :     int         ndeletable = 0;
    2694             :     OffsetNumber offnum,
    2695             :                 minoff,
    2696             :                 maxoff;
    2697       51290 :     Buffer      buffer = insertstate->buf;
    2698       51290 :     BTScanInsert itup_key = insertstate->itup_key;
    2699       51290 :     Page        page = BufferGetPage(buffer);
    2700       51290 :     BTPageOpaque opaque = BTPageGetOpaque(page);
    2701             : 
    2702             :     Assert(P_ISLEAF(opaque));
    2703             :     Assert(simpleonly || itup_key->heapkeyspace);
    2704             :     Assert(!simpleonly || (!checkingunique && !uniquedup && !indexUnchanged));
    2705             : 
    2706             :     /*
    2707             :      * Scan over all items to see which ones need to be deleted according to
    2708             :      * LP_DEAD flags.  We'll usually manage to delete a few extra items that
    2709             :      * are not marked LP_DEAD in passing.  Often the extra items that actually
    2710             :      * end up getting deleted are items that would have had their LP_DEAD bit
    2711             :      * set before long anyway (if we opted not to include them as extras).
    2712             :      */
    2713       51290 :     minoff = P_FIRSTDATAKEY(opaque);
    2714       51290 :     maxoff = PageGetMaxOffsetNumber(page);
    2715       51290 :     for (offnum = minoff;
    2716    13741110 :          offnum <= maxoff;
    2717    13689820 :          offnum = OffsetNumberNext(offnum))
    2718             :     {
    2719    13689820 :         ItemId      itemId = PageGetItemId(page, offnum);
    2720             : 
    2721    13689820 :         if (ItemIdIsDead(itemId))
    2722      255056 :             deletable[ndeletable++] = offnum;
    2723             :     }
    2724             : 
    2725       51290 :     if (ndeletable > 0)
    2726             :     {
    2727        7718 :         _bt_simpledel_pass(rel, buffer, heapRel, deletable, ndeletable,
    2728             :                            insertstate->itup, minoff, maxoff);
    2729        7718 :         insertstate->bounds_valid = false;
    2730             : 
    2731             :         /* Return when a page split has already been avoided */
    2732        7718 :         if (PageGetFreeSpace(page) >= insertstate->itemsz)
    2733       23592 :             return;
    2734             : 
    2735             :         /* Might as well assume duplicates (if checkingunique) */
    2736         100 :         uniquedup = true;
    2737             :     }
    2738             : 
    2739             :     /*
    2740             :      * We're done with simple deletion.  Return early with callers that only
    2741             :      * call here so that simple deletion can be considered.  This includes
    2742             :      * callers that explicitly ask for this and checkingunique callers that
    2743             :      * probably don't have any version churn duplicates on the page.
    2744             :      *
    2745             :      * Note: The page's BTP_HAS_GARBAGE hint flag may still be set when we
    2746             :      * return at this point (or when we go on the try either or both of our
    2747             :      * other strategies and they also fail).  We do not bother expending a
    2748             :      * separate write to clear it, however.  Caller will definitely clear it
    2749             :      * when it goes on to split the page (note also that the deduplication
    2750             :      * process will clear the flag in passing, just to keep things tidy).
    2751             :      */
    2752       43672 :     if (simpleonly || (checkingunique && !uniquedup))
    2753             :     {
    2754             :         Assert(!indexUnchanged);
    2755       15528 :         return;
    2756             :     }
    2757             : 
    2758             :     /* Assume bounds about to be invalidated (this is almost certain now) */
    2759       28144 :     insertstate->bounds_valid = false;
    2760             : 
    2761             :     /*
    2762             :      * Perform bottom-up index deletion pass when executor hint indicated that
    2763             :      * incoming item is logically unchanged, or for a unique index that is
    2764             :      * known to have physical duplicates for some other reason.  (There is a
    2765             :      * large overlap between these two cases for a unique index.  It's worth
    2766             :      * having both triggering conditions in order to apply the optimization in
    2767             :      * the event of successive related INSERT and DELETE statements.)
    2768             :      *
    2769             :      * We'll go on to do a deduplication pass when a bottom-up pass fails to
    2770             :      * delete an acceptable amount of free space (a significant fraction of
    2771             :      * the page, or space for the new item, whichever is greater).
    2772             :      *
    2773             :      * Note: Bottom-up index deletion uses the same equality/equivalence
    2774             :      * routines as deduplication internally.  However, it does not merge
    2775             :      * together index tuples, so the same correctness considerations do not
    2776             :      * apply.  We deliberately omit an index-is-allequalimage test here.
    2777             :      */
    2778       32070 :     if ((indexUnchanged || uniquedup) &&
    2779        3926 :         _bt_bottomupdel_pass(rel, buffer, heapRel, insertstate->itemsz))
    2780         446 :         return;
    2781             : 
    2782             :     /* Perform deduplication pass (when enabled and index-is-allequalimage) */
    2783       27698 :     if (BTGetDeduplicateItems(rel) && itup_key->allequalimage)
    2784       27680 :         _bt_dedup_pass(rel, buffer, insertstate->itup, insertstate->itemsz,
    2785       27680 :                        (indexUnchanged || uniquedup));
    2786             : }
    2787             : 
    2788             : /*
    2789             :  * _bt_simpledel_pass - Simple index tuple deletion pass.
    2790             :  *
    2791             :  * We delete all LP_DEAD-set index tuples on a leaf page.  The offset numbers
    2792             :  * of all such tuples are determined by caller (caller passes these to us as
    2793             :  * its 'deletable' argument).
    2794             :  *
    2795             :  * We might also delete extra index tuples that turn out to be safe to delete
    2796             :  * in passing (though they must be cheap to check in passing to begin with).
    2797             :  * There is no certainty that any extra tuples will be deleted, though.  The
    2798             :  * high level goal of the approach we take is to get the most out of each call
    2799             :  * here (without noticeably increasing the per-call overhead compared to what
    2800             :  * we need to do just to be able to delete the page's LP_DEAD-marked index
    2801             :  * tuples).
    2802             :  *
    2803             :  * The number of extra index tuples that turn out to be deletable might
    2804             :  * greatly exceed the number of LP_DEAD-marked index tuples due to various
    2805             :  * locality related effects.  For example, it's possible that the total number
    2806             :  * of table blocks (pointed to by all TIDs on the leaf page) is naturally
    2807             :  * quite low, in which case we might end up checking if it's possible to
    2808             :  * delete _most_ index tuples on the page (without the tableam needing to
    2809             :  * access additional table blocks).  The tableam will sometimes stumble upon
    2810             :  * _many_ extra deletable index tuples in indexes where this pattern is
    2811             :  * common.
    2812             :  *
    2813             :  * See nbtree/README for further details on simple index tuple deletion.
    2814             :  */
    2815             : static void
    2816        7718 : _bt_simpledel_pass(Relation rel, Buffer buffer, Relation heapRel,
    2817             :                    OffsetNumber *deletable, int ndeletable, IndexTuple newitem,
    2818             :                    OffsetNumber minoff, OffsetNumber maxoff)
    2819             : {
    2820        7718 :     Page        page = BufferGetPage(buffer);
    2821             :     BlockNumber *deadblocks;
    2822             :     int         ndeadblocks;
    2823             :     TM_IndexDeleteOp delstate;
    2824             :     OffsetNumber offnum;
    2825             : 
    2826             :     /* Get array of table blocks pointed to by LP_DEAD-set tuples */
    2827        7718 :     deadblocks = _bt_deadblocks(page, deletable, ndeletable, newitem,
    2828             :                                 &ndeadblocks);
    2829             : 
    2830             :     /* Initialize tableam state that describes index deletion operation */
    2831        7718 :     delstate.irel = rel;
    2832        7718 :     delstate.iblknum = BufferGetBlockNumber(buffer);
    2833        7718 :     delstate.bottomup = false;
    2834        7718 :     delstate.bottomupfreespace = 0;
    2835        7718 :     delstate.ndeltids = 0;
    2836        7718 :     delstate.deltids = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexDelete));
    2837        7718 :     delstate.status = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexStatus));
    2838             : 
    2839        7718 :     for (offnum = minoff;
    2840     2187376 :          offnum <= maxoff;
    2841     2179658 :          offnum = OffsetNumberNext(offnum))
    2842             :     {
    2843     2179658 :         ItemId      itemid = PageGetItemId(page, offnum);
    2844     2179658 :         IndexTuple  itup = (IndexTuple) PageGetItem(page, itemid);
    2845     2179658 :         TM_IndexDelete *odeltid = &delstate.deltids[delstate.ndeltids];
    2846     2179658 :         TM_IndexStatus *ostatus = &delstate.status[delstate.ndeltids];
    2847             :         BlockNumber tidblock;
    2848             :         void       *match;
    2849             : 
    2850     2179658 :         if (!BTreeTupleIsPosting(itup))
    2851             :         {
    2852     2081766 :             tidblock = ItemPointerGetBlockNumber(&itup->t_tid);
    2853     2081766 :             match = bsearch(&tidblock, deadblocks, ndeadblocks,
    2854             :                             sizeof(BlockNumber), _bt_blk_cmp);
    2855             : 
    2856     2081766 :             if (!match)
    2857             :             {
    2858             :                 Assert(!ItemIdIsDead(itemid));
    2859     1322702 :                 continue;
    2860             :             }
    2861             : 
    2862             :             /*
    2863             :              * TID's table block is among those pointed to by the TIDs from
    2864             :              * LP_DEAD-bit set tuples on page -- add TID to deltids
    2865             :              */
    2866      759064 :             odeltid->tid = itup->t_tid;
    2867      759064 :             odeltid->id = delstate.ndeltids;
    2868      759064 :             ostatus->idxoffnum = offnum;
    2869      759064 :             ostatus->knowndeletable = ItemIdIsDead(itemid);
    2870      759064 :             ostatus->promising = false; /* unused */
    2871      759064 :             ostatus->freespace = 0; /* unused */
    2872             : 
    2873      759064 :             delstate.ndeltids++;
    2874             :         }
    2875             :         else
    2876             :         {
    2877       97892 :             int         nitem = BTreeTupleGetNPosting(itup);
    2878             : 
    2879      470304 :             for (int p = 0; p < nitem; p++)
    2880             :             {
    2881      372412 :                 ItemPointer tid = BTreeTupleGetPostingN(itup, p);
    2882             : 
    2883      372412 :                 tidblock = ItemPointerGetBlockNumber(tid);
    2884      372412 :                 match = bsearch(&tidblock, deadblocks, ndeadblocks,
    2885             :                                 sizeof(BlockNumber), _bt_blk_cmp);
    2886             : 
    2887      372412 :                 if (!match)
    2888             :                 {
    2889             :                     Assert(!ItemIdIsDead(itemid));
    2890      328566 :                     continue;
    2891             :                 }
    2892             : 
    2893             :                 /*
    2894             :                  * TID's table block is among those pointed to by the TIDs
    2895             :                  * from LP_DEAD-bit set tuples on page -- add TID to deltids
    2896             :                  */
    2897       43846 :                 odeltid->tid = *tid;
    2898       43846 :                 odeltid->id = delstate.ndeltids;
    2899       43846 :                 ostatus->idxoffnum = offnum;
    2900       43846 :                 ostatus->knowndeletable = ItemIdIsDead(itemid);
    2901       43846 :                 ostatus->promising = false; /* unused */
    2902       43846 :                 ostatus->freespace = 0; /* unused */
    2903             : 
    2904       43846 :                 odeltid++;
    2905       43846 :                 ostatus++;
    2906       43846 :                 delstate.ndeltids++;
    2907             :             }
    2908             :         }
    2909             :     }
    2910             : 
    2911        7718 :     pfree(deadblocks);
    2912             : 
    2913             :     Assert(delstate.ndeltids >= ndeletable);
    2914             : 
    2915             :     /* Physically delete LP_DEAD tuples (plus any delete-safe extra TIDs) */
    2916        7718 :     _bt_delitems_delete_check(rel, buffer, heapRel, &delstate);
    2917             : 
    2918        7718 :     pfree(delstate.deltids);
    2919        7718 :     pfree(delstate.status);
    2920        7718 : }
    2921             : 
    2922             : /*
    2923             :  * _bt_deadblocks() -- Get LP_DEAD related table blocks.
    2924             :  *
    2925             :  * Builds sorted and unique-ified array of table block numbers from index
    2926             :  * tuple TIDs whose line pointers are marked LP_DEAD.  Also adds the table
    2927             :  * block from incoming newitem just in case it isn't among the LP_DEAD-related
    2928             :  * table blocks.
    2929             :  *
    2930             :  * Always counting the newitem's table block as an LP_DEAD related block makes
    2931             :  * sense because the cost is consistently low; it is practically certain that
    2932             :  * the table block will not incur a buffer miss in tableam.  On the other hand
    2933             :  * the benefit is often quite high.  There is a decent chance that there will
    2934             :  * be some deletable items from this block, since in general most garbage
    2935             :  * tuples became garbage in the recent past (in many cases this won't be the
    2936             :  * first logical row that core code added to/modified in table block
    2937             :  * recently).
    2938             :  *
    2939             :  * Returns final array, and sets *nblocks to its final size for caller.
    2940             :  */
    2941             : static BlockNumber *
    2942        7718 : _bt_deadblocks(Page page, OffsetNumber *deletable, int ndeletable,
    2943             :                IndexTuple newitem, int *nblocks)
    2944             : {
    2945             :     int         spacentids,
    2946             :                 ntids;
    2947             :     BlockNumber *tidblocks;
    2948             : 
    2949             :     /*
    2950             :      * Accumulate each TID's block in array whose initial size has space for
    2951             :      * one table block per LP_DEAD-set tuple (plus space for the newitem table
    2952             :      * block).  Array will only need to grow when there are LP_DEAD-marked
    2953             :      * posting list tuples (which is not that common).
    2954             :      */
    2955        7718 :     spacentids = ndeletable + 1;
    2956        7718 :     ntids = 0;
    2957        7718 :     tidblocks = (BlockNumber *) palloc(sizeof(BlockNumber) * spacentids);
    2958             : 
    2959             :     /*
    2960             :      * First add the table block for the incoming newitem.  This is the one
    2961             :      * case where simple deletion can visit a table block that doesn't have
    2962             :      * any known deletable items.
    2963             :      */
    2964             :     Assert(!BTreeTupleIsPosting(newitem) && !BTreeTupleIsPivot(newitem));
    2965        7718 :     tidblocks[ntids++] = ItemPointerGetBlockNumber(&newitem->t_tid);
    2966             : 
    2967      262774 :     for (int i = 0; i < ndeletable; i++)
    2968             :     {
    2969      255056 :         ItemId      itemid = PageGetItemId(page, deletable[i]);
    2970      255056 :         IndexTuple  itup = (IndexTuple) PageGetItem(page, itemid);
    2971             : 
    2972             :         Assert(ItemIdIsDead(itemid));
    2973             : 
    2974      255056 :         if (!BTreeTupleIsPosting(itup))
    2975             :         {
    2976      246612 :             if (ntids + 1 > spacentids)
    2977             :             {
    2978         194 :                 spacentids *= 2;
    2979             :                 tidblocks = (BlockNumber *)
    2980         194 :                     repalloc(tidblocks, sizeof(BlockNumber) * spacentids);
    2981             :             }
    2982             : 
    2983      246612 :             tidblocks[ntids++] = ItemPointerGetBlockNumber(&itup->t_tid);
    2984             :         }
    2985             :         else
    2986             :         {
    2987        8444 :             int         nposting = BTreeTupleGetNPosting(itup);
    2988             : 
    2989        8444 :             if (ntids + nposting > spacentids)
    2990             :             {
    2991         160 :                 spacentids = Max(spacentids * 2, ntids + nposting);
    2992             :                 tidblocks = (BlockNumber *)
    2993         160 :                     repalloc(tidblocks, sizeof(BlockNumber) * spacentids);
    2994             :             }
    2995             : 
    2996       27330 :             for (int j = 0; j < nposting; j++)
    2997             :             {
    2998       18886 :                 ItemPointer tid = BTreeTupleGetPostingN(itup, j);
    2999             : 
    3000       18886 :                 tidblocks[ntids++] = ItemPointerGetBlockNumber(tid);
    3001             :             }
    3002             :         }
    3003             :     }
    3004             : 
    3005        7718 :     qsort(tidblocks, ntids, sizeof(BlockNumber), _bt_blk_cmp);
    3006        7718 :     *nblocks = qunique(tidblocks, ntids, sizeof(BlockNumber), _bt_blk_cmp);
    3007             : 
    3008        7718 :     return tidblocks;
    3009             : }
    3010             : 
    3011             : /*
    3012             :  * _bt_blk_cmp() -- qsort comparison function for _bt_simpledel_pass
    3013             :  */
    3014             : static inline int
    3015     5461526 : _bt_blk_cmp(const void *arg1, const void *arg2)
    3016             : {
    3017     5461526 :     BlockNumber b1 = *((BlockNumber *) arg1);
    3018     5461526 :     BlockNumber b2 = *((BlockNumber *) arg2);
    3019             : 
    3020     5461526 :     return pg_cmp_u32(b1, b2);
    3021             : }

Generated by: LCOV version 1.16