LCOV - code coverage report
Current view: top level - src/backend/access/nbtree - nbtinsert.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 592 681 86.9 %
Date: 2020-06-01 09:07:10 Functions: 12 13 92.3 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nbtinsert.c
       4             :  *    Item insertion in Lehman and Yao btrees for Postgres.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/nbtree/nbtinsert.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : 
      16             : #include "postgres.h"
      17             : 
      18             : #include "access/nbtree.h"
      19             : #include "access/nbtxlog.h"
      20             : #include "access/tableam.h"
      21             : #include "access/transam.h"
      22             : #include "access/xloginsert.h"
      23             : #include "miscadmin.h"
      24             : #include "storage/lmgr.h"
      25             : #include "storage/predicate.h"
      26             : #include "storage/smgr.h"
      27             : 
      28             : /* Minimum tree height for application of fastpath optimization */
      29             : #define BTREE_FASTPATH_MIN_LEVEL    2
      30             : 
      31             : 
      32             : static BTStack _bt_search_insert(Relation rel, BTInsertState insertstate);
      33             : static TransactionId _bt_check_unique(Relation rel, BTInsertState insertstate,
      34             :                                       Relation heapRel,
      35             :                                       IndexUniqueCheck checkUnique, bool *is_unique,
      36             :                                       uint32 *speculativeToken);
      37             : static OffsetNumber _bt_findinsertloc(Relation rel,
      38             :                                       BTInsertState insertstate,
      39             :                                       bool checkingunique,
      40             :                                       BTStack stack,
      41             :                                       Relation heapRel);
      42             : static void _bt_stepright(Relation rel, BTInsertState insertstate, BTStack stack);
      43             : static void _bt_insertonpg(Relation rel, BTScanInsert itup_key,
      44             :                            Buffer buf,
      45             :                            Buffer cbuf,
      46             :                            BTStack stack,
      47             :                            IndexTuple itup,
      48             :                            Size itemsz,
      49             :                            OffsetNumber newitemoff,
      50             :                            int postingoff,
      51             :                            bool split_only_page);
      52             : static Buffer _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf,
      53             :                         Buffer cbuf, OffsetNumber newitemoff, Size newitemsz,
      54             :                         IndexTuple newitem, IndexTuple orignewitem,
      55             :                         IndexTuple nposting, uint16 postingoff);
      56             : static void _bt_insert_parent(Relation rel, Buffer buf, Buffer rbuf,
      57             :                               BTStack stack, bool is_root, bool is_only);
      58             : static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
      59             : static inline bool _bt_pgaddtup(Page page, Size itemsize, IndexTuple itup,
      60             :                                 OffsetNumber itup_off, bool newfirstdataitem);
      61             : static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
      62             : 
      63             : /*
      64             :  *  _bt_doinsert() -- Handle insertion of a single index tuple in the tree.
      65             :  *
      66             :  *      This routine is called by the public interface routine, btinsert.
      67             :  *      By here, itup is filled in, including the TID.
      68             :  *
      69             :  *      If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
      70             :  *      will allow duplicates.  Otherwise (UNIQUE_CHECK_YES or
      71             :  *      UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
      72             :  *      For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
      73             :  *      don't actually insert.
      74             :  *
      75             :  *      The result value is only significant for UNIQUE_CHECK_PARTIAL:
      76             :  *      it must be true if the entry is known unique, else false.
      77             :  *      (In the current implementation we'll also return true after a
      78             :  *      successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
      79             :  *      that's just a coding artifact.)
      80             :  */
      81             : bool
      82    11077394 : _bt_doinsert(Relation rel, IndexTuple itup,
      83             :              IndexUniqueCheck checkUnique, Relation heapRel)
      84             : {
      85    11077394 :     bool        is_unique = false;
      86             :     BTInsertStateData insertstate;
      87             :     BTScanInsert itup_key;
      88             :     BTStack     stack;
      89    11077394 :     bool        checkingunique = (checkUnique != UNIQUE_CHECK_NO);
      90             : 
      91             :     /* we need an insertion scan key to do our search, so build one */
      92    11077394 :     itup_key = _bt_mkscankey(rel, itup);
      93             : 
      94    11077394 :     if (checkingunique)
      95             :     {
      96     4728760 :         if (!itup_key->anynullkeys)
      97             :         {
      98             :             /* No (heapkeyspace) scantid until uniqueness established */
      99     4728664 :             itup_key->scantid = NULL;
     100             :         }
     101             :         else
     102             :         {
     103             :             /*
     104             :              * Scan key for new tuple contains NULL key values.  Bypass
     105             :              * checkingunique steps.  They are unnecessary because core code
     106             :              * considers NULL unequal to every value, including NULL.
     107             :              *
     108             :              * This optimization avoids O(N^2) behavior within the
     109             :              * _bt_findinsertloc() heapkeyspace path when a unique index has a
     110             :              * large number of "duplicates" with NULL key values.
     111             :              */
     112          96 :             checkingunique = false;
     113             :             /* Tuple is unique in the sense that core code cares about */
     114             :             Assert(checkUnique != UNIQUE_CHECK_EXISTING);
     115          96 :             is_unique = true;
     116             :         }
     117             :     }
     118             : 
     119             :     /*
     120             :      * Fill in the BTInsertState working area, to track the current page and
     121             :      * position within the page to insert on.
     122             :      *
     123             :      * Note that itemsz is passed down to lower level code that deals with
     124             :      * inserting the item.  It must be MAXALIGN()'d.  This ensures that space
     125             :      * accounting code consistently considers the alignment overhead that we
     126             :      * expect PageAddItem() will add later.  (Actually, index_form_tuple() is
     127             :      * already conservative about alignment, but we don't rely on that from
     128             :      * this distance.  Besides, preserving the "true" tuple size in index
     129             :      * tuple headers for the benefit of nbtsplitloc.c might happen someday.
     130             :      * Note that heapam does not MAXALIGN() each heap tuple's lp_len field.)
     131             :      */
     132    11077394 :     insertstate.itup = itup;
     133    11077394 :     insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
     134    11077394 :     insertstate.itup_key = itup_key;
     135    11077394 :     insertstate.bounds_valid = false;
     136    11077394 :     insertstate.buf = InvalidBuffer;
     137    11077394 :     insertstate.postingoff = 0;
     138             : 
     139    11077410 : search:
     140             : 
     141             :     /*
     142             :      * Find and lock the leaf page that the tuple should be added to by
     143             :      * searching from the root page.  insertstate.buf will hold a buffer that
     144             :      * is locked in exclusive mode afterwards.
     145             :      */
     146    11077410 :     stack = _bt_search_insert(rel, &insertstate);
     147             : 
     148             :     /*
     149             :      * checkingunique inserts are not allowed to go ahead when two tuples with
     150             :      * equal key attribute values would be visible to new MVCC snapshots once
     151             :      * the xact commits.  Check for conflicts in the locked page/buffer (if
     152             :      * needed) here.
     153             :      *
     154             :      * It might be necessary to check a page to the right in _bt_check_unique,
     155             :      * though that should be very rare.  In practice the first page the value
     156             :      * could be on (with scantid omitted) is almost always also the only page
     157             :      * that a matching tuple might be found on.  This is due to the behavior
     158             :      * of _bt_findsplitloc with duplicate tuples -- a group of duplicates can
     159             :      * only be allowed to cross a page boundary when there is no candidate
     160             :      * leaf page split point that avoids it.  Also, _bt_check_unique can use
     161             :      * the leaf page high key to determine that there will be no duplicates on
     162             :      * the right sibling without actually visiting it (it uses the high key in
     163             :      * cases where the new item happens to belong at the far right of the leaf
     164             :      * page).
     165             :      *
     166             :      * NOTE: obviously, _bt_check_unique can only detect keys that are already
     167             :      * in the index; so it cannot defend against concurrent insertions of the
     168             :      * same key.  We protect against that by means of holding a write lock on
     169             :      * the first page the value could be on, with omitted/-inf value for the
     170             :      * implicit heap TID tiebreaker attribute.  Any other would-be inserter of
     171             :      * the same key must acquire a write lock on the same page, so only one
     172             :      * would-be inserter can be making the check at one time.  Furthermore,
     173             :      * once we are past the check we hold write locks continuously until we
     174             :      * have performed our insertion, so no later inserter can fail to see our
     175             :      * insertion.  (This requires some care in _bt_findinsertloc.)
     176             :      *
     177             :      * If we must wait for another xact, we release the lock while waiting,
     178             :      * and then must perform a new search.
     179             :      *
     180             :      * For a partial uniqueness check, we don't wait for the other xact. Just
     181             :      * let the tuple in and return false for possibly non-unique, or true for
     182             :      * definitely unique.
     183             :      */
     184    11077410 :     if (checkingunique)
     185             :     {
     186             :         TransactionId xwait;
     187             :         uint32      speculativeToken;
     188             : 
     189     4728680 :         xwait = _bt_check_unique(rel, &insertstate, heapRel, checkUnique,
     190             :                                  &is_unique, &speculativeToken);
     191             : 
     192     4728340 :         if (unlikely(TransactionIdIsValid(xwait)))
     193             :         {
     194             :             /* Have to wait for the other guy ... */
     195          16 :             _bt_relbuf(rel, insertstate.buf);
     196          16 :             insertstate.buf = InvalidBuffer;
     197             : 
     198             :             /*
     199             :              * If it's a speculative insertion, wait for it to finish (ie. to
     200             :              * go ahead with the insertion, or kill the tuple).  Otherwise
     201             :              * wait for the transaction to finish as usual.
     202             :              */
     203          16 :             if (speculativeToken)
     204           0 :                 SpeculativeInsertionWait(xwait, speculativeToken);
     205             :             else
     206          16 :                 XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
     207             : 
     208             :             /* start over... */
     209          16 :             if (stack)
     210           0 :                 _bt_freestack(stack);
     211          16 :             goto search;
     212             :         }
     213             : 
     214             :         /* Uniqueness is established -- restore heap tid as scantid */
     215     4728324 :         if (itup_key->heapkeyspace)
     216     4728324 :             itup_key->scantid = &itup->t_tid;
     217             :     }
     218             : 
     219    11077054 :     if (checkUnique != UNIQUE_CHECK_EXISTING)
     220             :     {
     221             :         OffsetNumber newitemoff;
     222             : 
     223             :         /*
     224             :          * The only conflict predicate locking cares about for indexes is when
     225             :          * an index tuple insert conflicts with an existing lock.  We don't
     226             :          * know the actual page we're going to insert on for sure just yet in
     227             :          * checkingunique and !heapkeyspace cases, but it's okay to use the
     228             :          * first page the value could be on (with scantid omitted) instead.
     229             :          */
     230    11077018 :         CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate.buf));
     231             : 
     232             :         /*
     233             :          * Do the insertion.  Note that insertstate contains cached binary
     234             :          * search bounds established within _bt_check_unique when insertion is
     235             :          * checkingunique.
     236             :          */
     237    11077012 :         newitemoff = _bt_findinsertloc(rel, &insertstate, checkingunique,
     238             :                                        stack, heapRel);
     239    11077012 :         _bt_insertonpg(rel, itup_key, insertstate.buf, InvalidBuffer, stack,
     240             :                        itup, insertstate.itemsz, newitemoff,
     241             :                        insertstate.postingoff, false);
     242             :     }
     243             :     else
     244             :     {
     245             :         /* just release the buffer */
     246          36 :         _bt_relbuf(rel, insertstate.buf);
     247             :     }
     248             : 
     249             :     /* be tidy */
     250    11077048 :     if (stack)
     251     9920350 :         _bt_freestack(stack);
     252    11077048 :     pfree(itup_key);
     253             : 
     254    11077048 :     return is_unique;
     255             : }
     256             : 
     257             : /*
     258             :  *  _bt_search_insert() -- _bt_search() wrapper for inserts
     259             :  *
     260             :  * Search the tree for a particular scankey, or more precisely for the first
     261             :  * leaf page it could be on.  Try to make use of the fastpath optimization's
     262             :  * rightmost leaf page cache before actually searching the tree from the root
     263             :  * page, though.
     264             :  *
     265             :  * Return value is a stack of parent-page pointers (though see notes about
     266             :  * fastpath optimization and page splits below).  insertstate->buf is set to
     267             :  * the address of the leaf-page buffer, which is write-locked and pinned in
     268             :  * all cases (if necessary by creating a new empty root page for caller).
     269             :  *
     270             :  * The fastpath optimization avoids most of the work of searching the tree
     271             :  * repeatedly when a single backend inserts successive new tuples on the
     272             :  * rightmost leaf page of an index.  A backend cache of the rightmost leaf
     273             :  * page is maintained within _bt_insertonpg(), and used here.  The cache is
     274             :  * invalidated here when an insert of a non-pivot tuple must take place on a
     275             :  * non-rightmost leaf page.
     276             :  *
     277             :  * The optimization helps with indexes on an auto-incremented field.  It also
     278             :  * helps with indexes on datetime columns, as well as indexes with lots of
     279             :  * NULL values.  (NULLs usually get inserted in the rightmost page for single
     280             :  * column indexes, since they usually get treated as coming after everything
     281             :  * else in the key space.  Individual NULL tuples will generally be placed on
     282             :  * the rightmost leaf page due to the influence of the heap TID column.)
     283             :  *
     284             :  * Note that we avoid applying the optimization when there is insufficient
     285             :  * space on the rightmost page to fit caller's new item.  This is necessary
     286             :  * because we'll need to return a real descent stack when a page split is
     287             :  * expected (actually, caller can cope with a leaf page split that uses a NULL
     288             :  * stack, but that's very slow and so must be avoided).  Note also that the
     289             :  * fastpath optimization acquires the lock on the page conditionally as a way
     290             :  * of reducing extra contention when there are concurrent insertions into the
     291             :  * rightmost page (we give up if we'd have to wait for the lock).  We assume
     292             :  * that it isn't useful to apply the optimization when there is contention,
     293             :  * since each per-backend cache won't stay valid for long.
     294             :  */
     295             : static BTStack
     296    11077410 : _bt_search_insert(Relation rel, BTInsertState insertstate)
     297             : {
     298             :     Assert(insertstate->buf == InvalidBuffer);
     299             :     Assert(!insertstate->bounds_valid);
     300             :     Assert(insertstate->postingoff == 0);
     301             : 
     302    11077410 :     if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
     303             :     {
     304             :         /* Simulate a _bt_getbuf() call with conditional locking */
     305       68256 :         insertstate->buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
     306       68256 :         if (ConditionalLockBuffer(insertstate->buf))
     307             :         {
     308             :             Page        page;
     309             :             BTPageOpaque lpageop;
     310             : 
     311       67356 :             _bt_checkpage(rel, insertstate->buf);
     312       67356 :             page = BufferGetPage(insertstate->buf);
     313       67356 :             lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
     314             : 
     315             :             /*
     316             :              * Check if the page is still the rightmost leaf page and has
     317             :              * enough free space to accommodate the new tuple.  Also check
     318             :              * that the insertion scan key is strictly greater than the first
     319             :              * non-pivot tuple on the page.  (Note that we expect itup_key's
     320             :              * scantid to be unset when our caller is a checkingunique
     321             :              * inserter.)
     322             :              */
     323       67356 :             if (P_RIGHTMOST(lpageop) &&
     324       67262 :                 P_ISLEAF(lpageop) &&
     325       67262 :                 !P_IGNORE(lpageop) &&
     326       67262 :                 PageGetFreeSpace(page) > insertstate->itemsz &&
     327      133840 :                 PageGetMaxOffsetNumber(page) >= P_HIKEY &&
     328       66920 :                 _bt_compare(rel, insertstate->itup_key, page, P_HIKEY) > 0)
     329             :             {
     330             :                 /*
     331             :                  * Caller can use the fastpath optimization because cached
     332             :                  * block is still rightmost leaf page, which can fit caller's
     333             :                  * new tuple without splitting.  Keep block in local cache for
     334             :                  * next insert, and have caller use NULL stack.
     335             :                  *
     336             :                  * Note that _bt_insert_parent() has an assertion that catches
     337             :                  * leaf page splits that somehow follow from a fastpath insert
     338             :                  * (it should only be passed a NULL stack when it must deal
     339             :                  * with a concurrent root page split, and never because a NULL
     340             :                  * stack was returned here).
     341             :                  */
     342       66888 :                 return NULL;
     343             :             }
     344             : 
     345             :             /* Page unsuitable for caller, drop lock and pin */
     346         468 :             _bt_relbuf(rel, insertstate->buf);
     347             :         }
     348             :         else
     349             :         {
     350             :             /* Lock unavailable, drop pin */
     351         900 :             ReleaseBuffer(insertstate->buf);
     352             :         }
     353             : 
     354             :         /* Forget block, since cache doesn't appear to be useful */
     355        1368 :         RelationSetTargetBlock(rel, InvalidBlockNumber);
     356             :     }
     357             : 
     358             :     /* Cannot use optimization -- descend tree, return proper descent stack */
     359    11010522 :     return _bt_search(rel, insertstate->itup_key, &insertstate->buf, BT_WRITE,
     360             :                       NULL);
     361             : }
     362             : 
     363             : /*
     364             :  *  _bt_check_unique() -- Check for violation of unique index constraint
     365             :  *
     366             :  * Returns InvalidTransactionId if there is no conflict, else an xact ID
     367             :  * we must wait for to see if it commits a conflicting tuple.   If an actual
     368             :  * conflict is detected, no return --- just ereport().  If an xact ID is
     369             :  * returned, and the conflicting tuple still has a speculative insertion in
     370             :  * progress, *speculativeToken is set to non-zero, and the caller can wait for
     371             :  * the verdict on the insertion using SpeculativeInsertionWait().
     372             :  *
     373             :  * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
     374             :  * InvalidTransactionId because we don't want to wait.  In this case we
     375             :  * set *is_unique to false if there is a potential conflict, and the
     376             :  * core code must redo the uniqueness check later.
     377             :  *
     378             :  * As a side-effect, sets state in insertstate that can later be used by
     379             :  * _bt_findinsertloc() to reuse most of the binary search work we do
     380             :  * here.
     381             :  *
     382             :  * Do not call here when there are NULL values in scan key.  NULL should be
     383             :  * considered unequal to NULL when checking for duplicates, but we are not
     384             :  * prepared to handle that correctly.
     385             :  */
     386             : static TransactionId
     387     4728680 : _bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel,
     388             :                  IndexUniqueCheck checkUnique, bool *is_unique,
     389             :                  uint32 *speculativeToken)
     390             : {
     391     4728680 :     IndexTuple  itup = insertstate->itup;
     392             :     IndexTuple  curitup;
     393             :     ItemId      curitemid;
     394     4728680 :     BTScanInsert itup_key = insertstate->itup_key;
     395             :     SnapshotData SnapshotDirty;
     396             :     OffsetNumber offset;
     397             :     OffsetNumber maxoff;
     398             :     Page        page;
     399             :     BTPageOpaque opaque;
     400     4728680 :     Buffer      nbuf = InvalidBuffer;
     401     4728680 :     bool        found = false;
     402     4728680 :     bool        inposting = false;
     403     4728680 :     bool        prevalldead = true;
     404     4728680 :     int         curposti = 0;
     405             : 
     406             :     /* Assume unique until we find a duplicate */
     407     4728680 :     *is_unique = true;
     408             : 
     409     4728680 :     InitDirtySnapshot(SnapshotDirty);
     410             : 
     411     4728680 :     page = BufferGetPage(insertstate->buf);
     412     4728680 :     opaque = (BTPageOpaque) PageGetSpecialPointer(page);
     413     4728680 :     maxoff = PageGetMaxOffsetNumber(page);
     414             : 
     415             :     /*
     416             :      * Find the first tuple with the same key.
     417             :      *
     418             :      * This also saves the binary search bounds in insertstate.  We use them
     419             :      * in the fastpath below, but also in the _bt_findinsertloc() call later.
     420             :      */
     421             :     Assert(!insertstate->bounds_valid);
     422     4728680 :     offset = _bt_binsrch_insert(rel, insertstate);
     423             : 
     424             :     /*
     425             :      * Scan over all equal tuples, looking for live conflicts.
     426             :      */
     427             :     Assert(!insertstate->bounds_valid || insertstate->low == offset);
     428             :     Assert(!itup_key->anynullkeys);
     429             :     Assert(itup_key->scantid == NULL);
     430             :     for (;;)
     431             :     {
     432             :         /*
     433             :          * Each iteration of the loop processes one heap TID, not one index
     434             :          * tuple.  Current offset number for page isn't usually advanced on
     435             :          * iterations that process heap TIDs from posting list tuples.
     436             :          *
     437             :          * "inposting" state is set when _inside_ a posting list --- not when
     438             :          * we're at the start (or end) of a posting list.  We advance curposti
     439             :          * at the end of the iteration when inside a posting list tuple.  In
     440             :          * general, every loop iteration either advances the page offset or
     441             :          * advances curposti --- an iteration that handles the rightmost/max
     442             :          * heap TID in a posting list finally advances the page offset (and
     443             :          * unsets "inposting").
     444             :          *
     445             :          * Make sure the offset points to an actual index tuple before trying
     446             :          * to examine it...
     447             :          */
     448    12608152 :         if (offset <= maxoff)
     449             :         {
     450             :             /*
     451             :              * Fastpath: In most cases, we can use cached search bounds to
     452             :              * limit our consideration to items that are definitely
     453             :              * duplicates.  This fastpath doesn't apply when the original page
     454             :              * is empty, or when initial offset is past the end of the
     455             :              * original page, which may indicate that we need to examine a
     456             :              * second or subsequent page.
     457             :              *
     458             :              * Note that this optimization allows us to avoid calling
     459             :              * _bt_compare() directly when there are no duplicates, as long as
     460             :              * the offset where the key will go is not at the end of the page.
     461             :              */
     462    10047918 :             if (nbuf == InvalidBuffer && offset == insertstate->stricthigh)
     463             :             {
     464             :                 Assert(insertstate->bounds_valid);
     465             :                 Assert(insertstate->low >= P_FIRSTDATAKEY(opaque));
     466             :                 Assert(insertstate->low <= insertstate->stricthigh);
     467             :                 Assert(_bt_compare(rel, itup_key, page, offset) < 0);
     468     2054612 :                 break;
     469             :             }
     470             : 
     471             :             /*
     472             :              * We can skip items that are already marked killed.
     473             :              *
     474             :              * In the presence of heavy update activity an index may contain
     475             :              * many killed items with the same key; running _bt_compare() on
     476             :              * each killed item gets expensive.  Just advance over killed
     477             :              * items as quickly as we can.  We only apply _bt_compare() when
     478             :              * we get to a non-killed item.  We could reuse the bounds to
     479             :              * avoid _bt_compare() calls for known equal tuples, but it
     480             :              * doesn't seem worth it.  Workloads with heavy update activity
     481             :              * tend to have many deduplication passes, so we'll often avoid
     482             :              * most of those comparisons, too (we call _bt_compare() when the
     483             :              * posting list tuple is initially encountered, though not when
     484             :              * processing later TIDs from the same tuple).
     485             :              */
     486     7993306 :             if (!inposting)
     487     5006598 :                 curitemid = PageGetItemId(page, offset);
     488     7993306 :             if (inposting || !ItemIdIsDead(curitemid))
     489             :             {
     490             :                 ItemPointerData htid;
     491     7630242 :                 bool        all_dead = false;
     492             : 
     493     7630242 :                 if (!inposting)
     494             :                 {
     495             :                     /* Plain tuple, or first TID in posting list tuple */
     496     4643534 :                     if (_bt_compare(rel, itup_key, page, offset) != 0)
     497       98522 :                         break;  /* we're past all the equal tuples */
     498             : 
     499             :                     /* Advanced curitup */
     500     4545012 :                     curitup = (IndexTuple) PageGetItem(page, curitemid);
     501             :                     Assert(!BTreeTupleIsPivot(curitup));
     502             :                 }
     503             : 
     504             :                 /* okay, we gotta fetch the heap tuple using htid ... */
     505     7531720 :                 if (!BTreeTupleIsPosting(curitup))
     506             :                 {
     507             :                     /* ... htid is from simple non-pivot tuple */
     508             :                     Assert(!inposting);
     509     4527076 :                     htid = curitup->t_tid;
     510             :                 }
     511     3004644 :                 else if (!inposting)
     512             :                 {
     513             :                     /* ... htid is first TID in new posting list */
     514       17936 :                     inposting = true;
     515       17936 :                     prevalldead = true;
     516       17936 :                     curposti = 0;
     517       17936 :                     htid = *BTreeTupleGetPostingN(curitup, 0);
     518             :                 }
     519             :                 else
     520             :                 {
     521             :                     /* ... htid is second or subsequent TID in posting list */
     522             :                     Assert(curposti > 0);
     523     2986708 :                     htid = *BTreeTupleGetPostingN(curitup, curposti);
     524             :                 }
     525             : 
     526             :                 /*
     527             :                  * If we are doing a recheck, we expect to find the tuple we
     528             :                  * are rechecking.  It's not a duplicate, but we have to keep
     529             :                  * scanning.
     530             :                  */
     531     7531868 :                 if (checkUnique == UNIQUE_CHECK_EXISTING &&
     532         148 :                     ItemPointerCompare(&htid, &itup->t_tid) == 0)
     533             :                 {
     534          36 :                     found = true;
     535             :                 }
     536             : 
     537             :                 /*
     538             :                  * Check if there's any table tuples for this index entry
     539             :                  * satisfying SnapshotDirty. This is necessary because for AMs
     540             :                  * with optimizations like heap's HOT, we have just a single
     541             :                  * index entry for the entire chain.
     542             :                  */
     543     7531684 :                 else if (table_index_fetch_tuple_check(heapRel, &htid,
     544             :                                                        &SnapshotDirty,
     545             :                                                        &all_dead))
     546             :                 {
     547             :                     TransactionId xwait;
     548             : 
     549             :                     /*
     550             :                      * It is a duplicate. If we are only doing a partial
     551             :                      * check, then don't bother checking if the tuple is being
     552             :                      * updated in another transaction. Just return the fact
     553             :                      * that it is a potential conflict and leave the full
     554             :                      * check till later. Don't invalidate binary search
     555             :                      * bounds.
     556             :                      */
     557         432 :                     if (checkUnique == UNIQUE_CHECK_PARTIAL)
     558             :                     {
     559          76 :                         if (nbuf != InvalidBuffer)
     560           0 :                             _bt_relbuf(rel, nbuf);
     561          76 :                         *is_unique = false;
     562          92 :                         return InvalidTransactionId;
     563             :                     }
     564             : 
     565             :                     /*
     566             :                      * If this tuple is being updated by other transaction
     567             :                      * then we have to wait for its commit/abort.
     568             :                      */
     569         712 :                     xwait = (TransactionIdIsValid(SnapshotDirty.xmin)) ?
     570         356 :                         SnapshotDirty.xmin : SnapshotDirty.xmax;
     571             : 
     572         356 :                     if (TransactionIdIsValid(xwait))
     573             :                     {
     574          16 :                         if (nbuf != InvalidBuffer)
     575           0 :                             _bt_relbuf(rel, nbuf);
     576             :                         /* Tell _bt_doinsert to wait... */
     577          16 :                         *speculativeToken = SnapshotDirty.speculativeToken;
     578             :                         /* Caller releases lock on buf immediately */
     579          16 :                         insertstate->bounds_valid = false;
     580          16 :                         return xwait;
     581             :                     }
     582             : 
     583             :                     /*
     584             :                      * Otherwise we have a definite conflict.  But before
     585             :                      * complaining, look to see if the tuple we want to insert
     586             :                      * is itself now committed dead --- if so, don't complain.
     587             :                      * This is a waste of time in normal scenarios but we must
     588             :                      * do it to support CREATE INDEX CONCURRENTLY.
     589             :                      *
     590             :                      * We must follow HOT-chains here because during
     591             :                      * concurrent index build, we insert the root TID though
     592             :                      * the actual tuple may be somewhere in the HOT-chain.
     593             :                      * While following the chain we might not stop at the
     594             :                      * exact tuple which triggered the insert, but that's OK
     595             :                      * because if we find a live tuple anywhere in this chain,
     596             :                      * we have a unique key conflict.  The other live tuple is
     597             :                      * not part of this chain because it had a different index
     598             :                      * entry.
     599             :                      */
     600         340 :                     if (table_index_fetch_tuple_check(heapRel, &itup->t_tid,
     601             :                                                       SnapshotSelf, NULL))
     602             :                     {
     603             :                         /* Normal case --- it's still live */
     604             :                     }
     605             :                     else
     606             :                     {
     607             :                         /*
     608             :                          * It's been deleted, so no error, and no need to
     609             :                          * continue searching
     610             :                          */
     611           0 :                         break;
     612             :                     }
     613             : 
     614             :                     /*
     615             :                      * Check for a conflict-in as we would if we were going to
     616             :                      * write to this page.  We aren't actually going to write,
     617             :                      * but we want a chance to report SSI conflicts that would
     618             :                      * otherwise be masked by this unique constraint
     619             :                      * violation.
     620             :                      */
     621         340 :                     CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate->buf));
     622             : 
     623             :                     /*
     624             :                      * This is a definite conflict.  Break the tuple down into
     625             :                      * datums and report the error.  But first, make sure we
     626             :                      * release the buffer locks we're holding ---
     627             :                      * BuildIndexValueDescription could make catalog accesses,
     628             :                      * which in the worst case might touch this same index and
     629             :                      * cause deadlocks.
     630             :                      */
     631         332 :                     if (nbuf != InvalidBuffer)
     632           0 :                         _bt_relbuf(rel, nbuf);
     633         332 :                     _bt_relbuf(rel, insertstate->buf);
     634         332 :                     insertstate->buf = InvalidBuffer;
     635         332 :                     insertstate->bounds_valid = false;
     636             : 
     637             :                     {
     638             :                         Datum       values[INDEX_MAX_KEYS];
     639             :                         bool        isnull[INDEX_MAX_KEYS];
     640             :                         char       *key_desc;
     641             : 
     642         332 :                         index_deform_tuple(itup, RelationGetDescr(rel),
     643             :                                            values, isnull);
     644             : 
     645         332 :                         key_desc = BuildIndexValueDescription(rel, values,
     646             :                                                               isnull);
     647             : 
     648         332 :                         ereport(ERROR,
     649             :                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
     650             :                                  errmsg("duplicate key value violates unique constraint \"%s\"",
     651             :                                         RelationGetRelationName(rel)),
     652             :                                  key_desc ? errdetail("Key %s already exists.",
     653             :                                                       key_desc) : 0,
     654             :                                  errtableconstraint(heapRel,
     655             :                                                     RelationGetRelationName(rel))));
     656             :                     }
     657             :                 }
     658     7531252 :                 else if (all_dead && (!inposting ||
     659        2888 :                                       (prevalldead &&
     660        2888 :                                        curposti == BTreeTupleGetNPosting(curitup) - 1)))
     661             :                 {
     662             :                     /*
     663             :                      * The conflicting tuple (or all HOT chains pointed to by
     664             :                      * all posting list TIDs) is dead to everyone, so mark the
     665             :                      * index entry killed.
     666             :                      */
     667        3094 :                     ItemIdMarkDead(curitemid);
     668        3094 :                     opaque->btpo_flags |= BTP_HAS_GARBAGE;
     669             : 
     670             :                     /*
     671             :                      * Mark buffer with a dirty hint, since state is not
     672             :                      * crucial. Be sure to mark the proper buffer dirty.
     673             :                      */
     674        3094 :                     if (nbuf != InvalidBuffer)
     675           2 :                         MarkBufferDirtyHint(nbuf, true);
     676             :                     else
     677        3092 :                         MarkBufferDirtyHint(insertstate->buf, true);
     678             :                 }
     679             : 
     680             :                 /*
     681             :                  * Remember if posting list tuple has even a single HOT chain
     682             :                  * whose members are not all dead
     683             :                  */
     684     7531288 :                 if (!all_dead && inposting)
     685     3001756 :                     prevalldead = false;
     686             :             }
     687             :         }
     688             : 
     689    10454586 :         if (inposting && curposti < BTreeTupleGetNPosting(curitup) - 1)
     690             :         {
     691             :             /* Advance to next TID in same posting list */
     692     2986708 :             curposti++;
     693     2986708 :             continue;
     694             :         }
     695     7467878 :         else if (offset < maxoff)
     696             :         {
     697             :             /* Advance to next tuple */
     698     4886068 :             curposti = 0;
     699     4886068 :             inposting = false;
     700     4886068 :             offset = OffsetNumberNext(offset);
     701             :         }
     702             :         else
     703             :         {
     704             :             int         highkeycmp;
     705             : 
     706             :             /* If scankey == hikey we gotta check the next page too */
     707     2581810 :             if (P_RIGHTMOST(opaque))
     708     2435090 :                 break;
     709      146720 :             highkeycmp = _bt_compare(rel, itup_key, page, P_HIKEY);
     710             :             Assert(highkeycmp <= 0);
     711      146720 :             if (highkeycmp != 0)
     712      140024 :                 break;
     713             :             /* Advance to next non-dead page --- there must be one */
     714             :             for (;;)
     715           0 :             {
     716        6696 :                 BlockNumber nblkno = opaque->btpo_next;
     717             : 
     718        6696 :                 nbuf = _bt_relandgetbuf(rel, nbuf, nblkno, BT_READ);
     719        6696 :                 page = BufferGetPage(nbuf);
     720        6696 :                 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
     721        6696 :                 if (!P_IGNORE(opaque))
     722        6696 :                     break;
     723           0 :                 if (P_RIGHTMOST(opaque))
     724           0 :                     elog(ERROR, "fell off the end of index \"%s\"",
     725             :                          RelationGetRelationName(rel));
     726             :             }
     727             :             /* Will also advance to next tuple */
     728        6696 :             curposti = 0;
     729        6696 :             inposting = false;
     730        6696 :             maxoff = PageGetMaxOffsetNumber(page);
     731        6696 :             offset = P_FIRSTDATAKEY(opaque);
     732             :             /* Don't invalidate binary search bounds */
     733             :         }
     734             :     }
     735             : 
     736             :     /*
     737             :      * If we are doing a recheck then we should have found the tuple we are
     738             :      * checking.  Otherwise there's something very wrong --- probably, the
     739             :      * index is on a non-immutable expression.
     740             :      */
     741     4728248 :     if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
     742           0 :         ereport(ERROR,
     743             :                 (errcode(ERRCODE_INTERNAL_ERROR),
     744             :                  errmsg("failed to re-find tuple within index \"%s\"",
     745             :                         RelationGetRelationName(rel)),
     746             :                  errhint("This may be because of a non-immutable index expression."),
     747             :                  errtableconstraint(heapRel,
     748             :                                     RelationGetRelationName(rel))));
     749             : 
     750     4728248 :     if (nbuf != InvalidBuffer)
     751        3840 :         _bt_relbuf(rel, nbuf);
     752             : 
     753     4728248 :     return InvalidTransactionId;
     754             : }
     755             : 
     756             : 
     757             : /*
     758             :  *  _bt_findinsertloc() -- Finds an insert location for a tuple
     759             :  *
     760             :  *      On entry, insertstate buffer contains the page the new tuple belongs
     761             :  *      on.  It is exclusive-locked and pinned by the caller.
     762             :  *
     763             :  *      If 'checkingunique' is true, the buffer on entry is the first page
     764             :  *      that contains duplicates of the new key.  If there are duplicates on
     765             :  *      multiple pages, the correct insertion position might be some page to
     766             :  *      the right, rather than the first page.  In that case, this function
     767             :  *      moves right to the correct target page.
     768             :  *
     769             :  *      (In a !heapkeyspace index, there can be multiple pages with the same
     770             :  *      high key, where the new tuple could legitimately be placed on.  In
     771             :  *      that case, the caller passes the first page containing duplicates,
     772             :  *      just like when checkingunique=true.  If that page doesn't have enough
     773             :  *      room for the new tuple, this function moves right, trying to find a
     774             :  *      legal page that does.)
     775             :  *
     776             :  *      On exit, insertstate buffer contains the chosen insertion page, and
     777             :  *      the offset within that page is returned.  If _bt_findinsertloc needed
     778             :  *      to move right, the lock and pin on the original page are released, and
     779             :  *      the new buffer is exclusively locked and pinned instead.
     780             :  *
     781             :  *      If insertstate contains cached binary search bounds, we will take
     782             :  *      advantage of them.  This avoids repeating comparisons that we made in
     783             :  *      _bt_check_unique() already.
     784             :  *
     785             :  *      If there is not enough room on the page for the new tuple, we try to
     786             :  *      make room by removing any LP_DEAD tuples.
     787             :  */
     788             : static OffsetNumber
     789    11077012 : _bt_findinsertloc(Relation rel,
     790             :                   BTInsertState insertstate,
     791             :                   bool checkingunique,
     792             :                   BTStack stack,
     793             :                   Relation heapRel)
     794             : {
     795    11077012 :     BTScanInsert itup_key = insertstate->itup_key;
     796    11077012 :     Page        page = BufferGetPage(insertstate->buf);
     797             :     BTPageOpaque lpageop;
     798             :     OffsetNumber newitemoff;
     799             : 
     800    11077012 :     lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
     801             : 
     802             :     /* Check 1/3 of a page restriction */
     803    11077012 :     if (unlikely(insertstate->itemsz > BTMaxItemSize(page)))
     804           0 :         _bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page,
     805             :                              insertstate->itup);
     806             : 
     807             :     Assert(P_ISLEAF(lpageop) && !P_INCOMPLETE_SPLIT(lpageop));
     808             :     Assert(!insertstate->bounds_valid || checkingunique);
     809             :     Assert(!itup_key->heapkeyspace || itup_key->scantid != NULL);
     810             :     Assert(itup_key->heapkeyspace || itup_key->scantid == NULL);
     811             :     Assert(!itup_key->allequalimage || itup_key->heapkeyspace);
     812             : 
     813    11077012 :     if (itup_key->heapkeyspace)
     814             :     {
     815             :         /* Keep track of whether checkingunique duplicate seen */
     816    11077012 :         bool        uniquedup = false;
     817             : 
     818             :         /*
     819             :          * If we're inserting into a unique index, we may have to walk right
     820             :          * through leaf pages to find the one leaf page that we must insert on
     821             :          * to.
     822             :          *
     823             :          * This is needed for checkingunique callers because a scantid was not
     824             :          * used when we called _bt_search().  scantid can only be set after
     825             :          * _bt_check_unique() has checked for duplicates.  The buffer
     826             :          * initially stored in insertstate->buf has the page where the first
     827             :          * duplicate key might be found, which isn't always the page that new
     828             :          * tuple belongs on.  The heap TID attribute for new tuple (scantid)
     829             :          * could force us to insert on a sibling page, though that should be
     830             :          * very rare in practice.
     831             :          */
     832    11077012 :         if (checkingunique)
     833             :         {
     834     4728282 :             if (insertstate->low < insertstate->stricthigh)
     835             :             {
     836             :                 /* Encountered a duplicate in _bt_check_unique() */
     837             :                 Assert(insertstate->bounds_valid);
     838      237802 :                 uniquedup = true;
     839             :             }
     840             : 
     841             :             for (;;)
     842             :             {
     843             :                 /*
     844             :                  * Does the new tuple belong on this page?
     845             :                  *
     846             :                  * The earlier _bt_check_unique() call may well have
     847             :                  * established a strict upper bound on the offset for the new
     848             :                  * item.  If it's not the last item of the page (i.e. if there
     849             :                  * is at least one tuple on the page that goes after the tuple
     850             :                  * we're inserting) then we know that the tuple belongs on
     851             :                  * this page.  We can skip the high key check.
     852             :                  */
     853     4734978 :                 if (insertstate->bounds_valid &&
     854     9437120 :                     insertstate->low <= insertstate->stricthigh &&
     855     4718560 :                     insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
     856     2139292 :                     break;
     857             : 
     858             :                 /* Test '<=', not '!=', since scantid is set now */
     859     2749860 :                 if (P_RIGHTMOST(lpageop) ||
     860      154174 :                     _bt_compare(rel, itup_key, page, P_HIKEY) <= 0)
     861             :                     break;
     862             : 
     863        6696 :                 _bt_stepright(rel, insertstate, stack);
     864             :                 /* Update local state after stepping right */
     865        6696 :                 page = BufferGetPage(insertstate->buf);
     866        6696 :                 lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
     867             :                 /* Assume duplicates (if checkingunique) */
     868        6696 :                 uniquedup = true;
     869             :             }
     870             :         }
     871             : 
     872             :         /*
     873             :          * If the target page is full, see if we can obtain enough space by
     874             :          * erasing LP_DEAD items.  If that fails to free enough space, see if
     875             :          * we can avoid a page split by performing a deduplication pass over
     876             :          * the page.
     877             :          *
     878             :          * We only perform a deduplication pass for a checkingunique caller
     879             :          * when the incoming item is a duplicate of an existing item on the
     880             :          * leaf page.  This heuristic avoids wasting cycles -- we only expect
     881             :          * to benefit from deduplicating a unique index page when most or all
     882             :          * recently added items are duplicates.  See nbtree/README.
     883             :          */
     884    11077012 :         if (PageGetFreeSpace(page) < insertstate->itemsz)
     885             :         {
     886       52312 :             if (P_HAS_GARBAGE(lpageop))
     887             :             {
     888        3692 :                 _bt_vacuum_one_page(rel, insertstate->buf, heapRel);
     889        3692 :                 insertstate->bounds_valid = false;
     890             : 
     891             :                 /* Might as well assume duplicates (if checkingunique) */
     892        3692 :                 uniquedup = true;
     893             :             }
     894             : 
     895       52312 :             if (itup_key->allequalimage && BTGetDeduplicateItems(rel) &&
     896        4586 :                 (!checkingunique || uniquedup) &&
     897        3006 :                 PageGetFreeSpace(page) < insertstate->itemsz)
     898             :             {
     899        2934 :                 _bt_dedup_one_page(rel, insertstate->buf, heapRel,
     900             :                                    insertstate->itup, insertstate->itemsz,
     901             :                                    checkingunique);
     902        2934 :                 insertstate->bounds_valid = false;
     903             :             }
     904             :         }
     905             :     }
     906             :     else
     907             :     {
     908             :         /*----------
     909             :          * This is a !heapkeyspace (version 2 or 3) index.  The current page
     910             :          * is the first page that we could insert the new tuple to, but there
     911             :          * may be other pages to the right that we could opt to use instead.
     912             :          *
     913             :          * If the new key is equal to one or more existing keys, we can
     914             :          * legitimately place it anywhere in the series of equal keys.  In
     915             :          * fact, if the new key is equal to the page's "high key" we can place
     916             :          * it on the next page.  If it is equal to the high key, and there's
     917             :          * not room to insert the new tuple on the current page without
     918             :          * splitting, then we move right hoping to find more free space and
     919             :          * avoid a split.
     920             :          *
     921             :          * Keep scanning right until we
     922             :          *      (a) find a page with enough free space,
     923             :          *      (b) reach the last page where the tuple can legally go, or
     924             :          *      (c) get tired of searching.
     925             :          * (c) is not flippant; it is important because if there are many
     926             :          * pages' worth of equal keys, it's better to split one of the early
     927             :          * pages than to scan all the way to the end of the run of equal keys
     928             :          * on every insert.  We implement "get tired" as a random choice,
     929             :          * since stopping after scanning a fixed number of pages wouldn't work
     930             :          * well (we'd never reach the right-hand side of previously split
     931             :          * pages).  The probability of moving right is set at 0.99, which may
     932             :          * seem too high to change the behavior much, but it does an excellent
     933             :          * job of preventing O(N^2) behavior with many equal keys.
     934             :          *----------
     935             :          */
     936           0 :         while (PageGetFreeSpace(page) < insertstate->itemsz)
     937             :         {
     938             :             /*
     939             :              * Before considering moving right, see if we can obtain enough
     940             :              * space by erasing LP_DEAD items
     941             :              */
     942           0 :             if (P_HAS_GARBAGE(lpageop))
     943             :             {
     944           0 :                 _bt_vacuum_one_page(rel, insertstate->buf, heapRel);
     945           0 :                 insertstate->bounds_valid = false;
     946             : 
     947           0 :                 if (PageGetFreeSpace(page) >= insertstate->itemsz)
     948           0 :                     break;      /* OK, now we have enough space */
     949             :             }
     950             : 
     951             :             /*
     952             :              * Nope, so check conditions (b) and (c) enumerated above
     953             :              *
     954             :              * The earlier _bt_check_unique() call may well have established a
     955             :              * strict upper bound on the offset for the new item.  If it's not
     956             :              * the last item of the page (i.e. if there is at least one tuple
     957             :              * on the page that's greater than the tuple we're inserting to)
     958             :              * then we know that the tuple belongs on this page.  We can skip
     959             :              * the high key check.
     960             :              */
     961           0 :             if (insertstate->bounds_valid &&
     962           0 :                 insertstate->low <= insertstate->stricthigh &&
     963           0 :                 insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
     964           0 :                 break;
     965             : 
     966           0 :             if (P_RIGHTMOST(lpageop) ||
     967           0 :                 _bt_compare(rel, itup_key, page, P_HIKEY) != 0 ||
     968           0 :                 random() <= (MAX_RANDOM_VALUE / 100))
     969             :                 break;
     970             : 
     971           0 :             _bt_stepright(rel, insertstate, stack);
     972             :             /* Update local state after stepping right */
     973           0 :             page = BufferGetPage(insertstate->buf);
     974           0 :             lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
     975             :         }
     976             :     }
     977             : 
     978             :     /*
     979             :      * We should now be on the correct page.  Find the offset within the page
     980             :      * for the new tuple. (Possibly reusing earlier search bounds.)
     981             :      */
     982             :     Assert(P_RIGHTMOST(lpageop) ||
     983             :            _bt_compare(rel, itup_key, page, P_HIKEY) <= 0);
     984             : 
     985    11077012 :     newitemoff = _bt_binsrch_insert(rel, insertstate);
     986             : 
     987    11077012 :     if (insertstate->postingoff == -1)
     988             :     {
     989             :         /*
     990             :          * There is an overlapping posting list tuple with its LP_DEAD bit
     991             :          * set.  We don't want to unnecessarily unset its LP_DEAD bit while
     992             :          * performing a posting list split, so delete all LP_DEAD items early.
     993             :          * This is the only case where LP_DEAD deletes happen even though
     994             :          * there is space for newitem on the page.
     995             :          */
     996           0 :         _bt_vacuum_one_page(rel, insertstate->buf, heapRel);
     997             : 
     998             :         /*
     999             :          * Do new binary search.  New insert location cannot overlap with any
    1000             :          * posting list now.
    1001             :          */
    1002           0 :         insertstate->bounds_valid = false;
    1003           0 :         insertstate->postingoff = 0;
    1004           0 :         newitemoff = _bt_binsrch_insert(rel, insertstate);
    1005             :         Assert(insertstate->postingoff == 0);
    1006             :     }
    1007             : 
    1008    11077012 :     return newitemoff;
    1009             : }
    1010             : 
    1011             : /*
    1012             :  * Step right to next non-dead page, during insertion.
    1013             :  *
    1014             :  * This is a bit more complicated than moving right in a search.  We must
    1015             :  * write-lock the target page before releasing write lock on current page;
    1016             :  * else someone else's _bt_check_unique scan could fail to see our insertion.
    1017             :  * Write locks on intermediate dead pages won't do because we don't know when
    1018             :  * they will get de-linked from the tree.
    1019             :  *
    1020             :  * This is more aggressive than it needs to be for non-unique !heapkeyspace
    1021             :  * indexes.
    1022             :  */
    1023             : static void
    1024        6696 : _bt_stepright(Relation rel, BTInsertState insertstate, BTStack stack)
    1025             : {
    1026             :     Page        page;
    1027             :     BTPageOpaque lpageop;
    1028             :     Buffer      rbuf;
    1029             :     BlockNumber rblkno;
    1030             : 
    1031        6696 :     page = BufferGetPage(insertstate->buf);
    1032        6696 :     lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
    1033             : 
    1034        6696 :     rbuf = InvalidBuffer;
    1035        6696 :     rblkno = lpageop->btpo_next;
    1036             :     for (;;)
    1037             :     {
    1038        6696 :         rbuf = _bt_relandgetbuf(rel, rbuf, rblkno, BT_WRITE);
    1039        6696 :         page = BufferGetPage(rbuf);
    1040        6696 :         lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
    1041             : 
    1042             :         /*
    1043             :          * If this page was incompletely split, finish the split now.  We do
    1044             :          * this while holding a lock on the left sibling, which is not good
    1045             :          * because finishing the split could be a fairly lengthy operation.
    1046             :          * But this should happen very seldom.
    1047             :          */
    1048        6696 :         if (P_INCOMPLETE_SPLIT(lpageop))
    1049             :         {
    1050           0 :             _bt_finish_split(rel, rbuf, stack);
    1051           0 :             rbuf = InvalidBuffer;
    1052           0 :             continue;
    1053             :         }
    1054             : 
    1055        6696 :         if (!P_IGNORE(lpageop))
    1056        6696 :             break;
    1057           0 :         if (P_RIGHTMOST(lpageop))
    1058           0 :             elog(ERROR, "fell off the end of index \"%s\"",
    1059             :                  RelationGetRelationName(rel));
    1060             : 
    1061           0 :         rblkno = lpageop->btpo_next;
    1062             :     }
    1063             :     /* rbuf locked; unlock buf, update state for caller */
    1064        6696 :     _bt_relbuf(rel, insertstate->buf);
    1065        6696 :     insertstate->buf = rbuf;
    1066        6696 :     insertstate->bounds_valid = false;
    1067        6696 : }
    1068             : 
    1069             : /*----------
    1070             :  *  _bt_insertonpg() -- Insert a tuple on a particular page in the index.
    1071             :  *
    1072             :  *      This recursive procedure does the following things:
    1073             :  *
    1074             :  *          +  if postingoff != 0, splits existing posting list tuple
    1075             :  *             (since it overlaps with new 'itup' tuple).
    1076             :  *          +  if necessary, splits the target page, using 'itup_key' for
    1077             :  *             suffix truncation on leaf pages (caller passes NULL for
    1078             :  *             non-leaf pages).
    1079             :  *          +  inserts the new tuple (might be split from posting list).
    1080             :  *          +  if the page was split, pops the parent stack, and finds the
    1081             :  *             right place to insert the new child pointer (by walking
    1082             :  *             right using information stored in the parent stack).
    1083             :  *          +  invokes itself with the appropriate tuple for the right
    1084             :  *             child page on the parent.
    1085             :  *          +  updates the metapage if a true root or fast root is split.
    1086             :  *
    1087             :  *      On entry, we must have the correct buffer in which to do the
    1088             :  *      insertion, and the buffer must be pinned and write-locked.  On return,
    1089             :  *      we will have dropped both the pin and the lock on the buffer.
    1090             :  *
    1091             :  *      This routine only performs retail tuple insertions.  'itup' should
    1092             :  *      always be either a non-highkey leaf item, or a downlink (new high
    1093             :  *      key items are created indirectly, when a page is split).  When
    1094             :  *      inserting to a non-leaf page, 'cbuf' is the left-sibling of the page
    1095             :  *      we're inserting the downlink for.  This function will clear the
    1096             :  *      INCOMPLETE_SPLIT flag on it, and release the buffer.
    1097             :  *----------
    1098             :  */
    1099             : static void
    1100    11120950 : _bt_insertonpg(Relation rel,
    1101             :                BTScanInsert itup_key,
    1102             :                Buffer buf,
    1103             :                Buffer cbuf,
    1104             :                BTStack stack,
    1105             :                IndexTuple itup,
    1106             :                Size itemsz,
    1107             :                OffsetNumber newitemoff,
    1108             :                int postingoff,
    1109             :                bool split_only_page)
    1110             : {
    1111             :     Page        page;
    1112             :     BTPageOpaque lpageop;
    1113    11120950 :     IndexTuple  oposting = NULL;
    1114    11120950 :     IndexTuple  origitup = NULL;
    1115    11120950 :     IndexTuple  nposting = NULL;
    1116             : 
    1117    11120950 :     page = BufferGetPage(buf);
    1118    11120950 :     lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
    1119             : 
    1120             :     /* child buffer must be given iff inserting on an internal page */
    1121             :     Assert(P_ISLEAF(lpageop) == !BufferIsValid(cbuf));
    1122             :     /* tuple must have appropriate number of attributes */
    1123             :     Assert(!P_ISLEAF(lpageop) ||
    1124             :            BTreeTupleGetNAtts(itup, rel) ==
    1125             :            IndexRelationGetNumberOfAttributes(rel));
    1126             :     Assert(P_ISLEAF(lpageop) ||
    1127             :            BTreeTupleGetNAtts(itup, rel) <=
    1128             :            IndexRelationGetNumberOfKeyAttributes(rel));
    1129             :     Assert(!BTreeTupleIsPosting(itup));
    1130             :     Assert(MAXALIGN(IndexTupleSize(itup)) == itemsz);
    1131             : 
    1132             :     /*
    1133             :      * Every internal page should have exactly one negative infinity item at
    1134             :      * all times.  Only _bt_split() and _bt_newroot() should add items that
    1135             :      * become negative infinity items through truncation, since they're the
    1136             :      * only routines that allocate new internal pages.
    1137             :      */
    1138             :     Assert(P_ISLEAF(lpageop) || newitemoff > P_FIRSTDATAKEY(lpageop));
    1139             : 
    1140             :     /* The caller should've finished any incomplete splits already. */
    1141    11120950 :     if (P_INCOMPLETE_SPLIT(lpageop))
    1142           0 :         elog(ERROR, "cannot insert to incompletely split page %u",
    1143             :              BufferGetBlockNumber(buf));
    1144             : 
    1145             :     /*
    1146             :      * Do we need to split an existing posting list item?
    1147             :      */
    1148    11120950 :     if (postingoff != 0)
    1149             :     {
    1150        4022 :         ItemId      itemid = PageGetItemId(page, newitemoff);
    1151             : 
    1152             :         /*
    1153             :          * The new tuple is a duplicate with a heap TID that falls inside the
    1154             :          * range of an existing posting list tuple on a leaf page.  Prepare to
    1155             :          * split an existing posting list.  Overwriting the posting list with
    1156             :          * its post-split version is treated as an extra step in either the
    1157             :          * insert or page split critical section.
    1158             :          */
    1159             :         Assert(P_ISLEAF(lpageop) && !ItemIdIsDead(itemid));
    1160             :         Assert(itup_key->heapkeyspace && itup_key->allequalimage);
    1161        4022 :         oposting = (IndexTuple) PageGetItem(page, itemid);
    1162             : 
    1163             :         /* use a mutable copy of itup as our itup from here on */
    1164        4022 :         origitup = itup;
    1165        4022 :         itup = CopyIndexTuple(origitup);
    1166        4022 :         nposting = _bt_swap_posting(itup, oposting, postingoff);
    1167             :         /* itup now contains rightmost/max TID from oposting */
    1168             : 
    1169             :         /* Alter offset so that newitem goes after posting list */
    1170        4022 :         newitemoff = OffsetNumberNext(newitemoff);
    1171             :     }
    1172             : 
    1173             :     /*
    1174             :      * Do we need to split the page to fit the item on it?
    1175             :      *
    1176             :      * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
    1177             :      * so this comparison is correct even though we appear to be accounting
    1178             :      * only for the item and not for its line pointer.
    1179             :      */
    1180    11120950 :     if (PageGetFreeSpace(page) < itemsz)
    1181             :     {
    1182       46348 :         bool        is_root = P_ISROOT(lpageop);
    1183       46348 :         bool        is_only = P_LEFTMOST(lpageop) && P_RIGHTMOST(lpageop);
    1184             :         Buffer      rbuf;
    1185             : 
    1186             :         Assert(!split_only_page);
    1187             : 
    1188             :         /* split the buffer into left and right halves */
    1189       46348 :         rbuf = _bt_split(rel, itup_key, buf, cbuf, newitemoff, itemsz, itup,
    1190             :                          origitup, nposting, postingoff);
    1191       46348 :         PredicateLockPageSplit(rel,
    1192             :                                BufferGetBlockNumber(buf),
    1193             :                                BufferGetBlockNumber(rbuf));
    1194             : 
    1195             :         /*----------
    1196             :          * By here,
    1197             :          *
    1198             :          *      +  our target page has been split;
    1199             :          *      +  the original tuple has been inserted;
    1200             :          *      +  we have write locks on both the old (left half)
    1201             :          *         and new (right half) buffers, after the split; and
    1202             :          *      +  we know the key we want to insert into the parent
    1203             :          *         (it's the "high key" on the left child page).
    1204             :          *
    1205             :          * We're ready to do the parent insertion.  We need to hold onto the
    1206             :          * locks for the child pages until we locate the parent, but we can
    1207             :          * at least release the lock on the right child before doing the
    1208             :          * actual insertion.  The lock on the left child will be released
    1209             :          * last of all by parent insertion, where it is the 'cbuf' of parent
    1210             :          * page.
    1211             :          *----------
    1212             :          */
    1213       46348 :         _bt_insert_parent(rel, buf, rbuf, stack, is_root, is_only);
    1214             :     }
    1215             :     else
    1216             :     {
    1217    11074602 :         bool        isleaf = P_ISLEAF(lpageop);
    1218    11074602 :         bool        isrightmost = P_RIGHTMOST(lpageop);
    1219    11074602 :         Buffer      metabuf = InvalidBuffer;
    1220    11074602 :         Page        metapg = NULL;
    1221    11074602 :         BTMetaPageData *metad = NULL;
    1222             :         BlockNumber blockcache;
    1223             : 
    1224             :         /*
    1225             :          * If we are doing this insert because we split a page that was the
    1226             :          * only one on its tree level, but was not the root, it may have been
    1227             :          * the "fast root".  We need to ensure that the fast root link points
    1228             :          * at or above the current page.  We can safely acquire a lock on the
    1229             :          * metapage here --- see comments for _bt_newroot().
    1230             :          */
    1231    11074602 :         if (split_only_page)
    1232             :         {
    1233             :             Assert(!isleaf);
    1234             :             Assert(BufferIsValid(cbuf));
    1235             : 
    1236           6 :             metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    1237           6 :             metapg = BufferGetPage(metabuf);
    1238           6 :             metad = BTPageGetMeta(metapg);
    1239             : 
    1240           6 :             if (metad->btm_fastlevel >= lpageop->btpo.level)
    1241             :             {
    1242             :                 /* no update wanted */
    1243           0 :                 _bt_relbuf(rel, metabuf);
    1244           0 :                 metabuf = InvalidBuffer;
    1245             :             }
    1246             :         }
    1247             : 
    1248             :         /* Do the update.  No ereport(ERROR) until changes are logged */
    1249    11074602 :         START_CRIT_SECTION();
    1250             : 
    1251    11074602 :         if (postingoff != 0)
    1252        4010 :             memcpy(oposting, nposting, MAXALIGN(IndexTupleSize(nposting)));
    1253             : 
    1254    11074602 :         if (PageAddItem(page, (Item) itup, itemsz, newitemoff, false,
    1255             :                         false) == InvalidOffsetNumber)
    1256           0 :             elog(PANIC, "failed to add new item to block %u in index \"%s\"",
    1257             :                  BufferGetBlockNumber(buf), RelationGetRelationName(rel));
    1258             : 
    1259    11074602 :         MarkBufferDirty(buf);
    1260             : 
    1261    11074602 :         if (BufferIsValid(metabuf))
    1262             :         {
    1263             :             /* upgrade meta-page if needed */
    1264           6 :             if (metad->btm_version < BTREE_NOVAC_VERSION)
    1265           0 :                 _bt_upgrademetapage(metapg);
    1266           6 :             metad->btm_fastroot = BufferGetBlockNumber(buf);
    1267           6 :             metad->btm_fastlevel = lpageop->btpo.level;
    1268           6 :             MarkBufferDirty(metabuf);
    1269             :         }
    1270             : 
    1271             :         /*
    1272             :          * Clear INCOMPLETE_SPLIT flag on child if inserting the new item
    1273             :          * finishes a split
    1274             :          */
    1275    11074602 :         if (!isleaf)
    1276             :         {
    1277       43838 :             Page        cpage = BufferGetPage(cbuf);
    1278       43838 :             BTPageOpaque cpageop = (BTPageOpaque) PageGetSpecialPointer(cpage);
    1279             : 
    1280             :             Assert(P_INCOMPLETE_SPLIT(cpageop));
    1281       43838 :             cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
    1282       43838 :             MarkBufferDirty(cbuf);
    1283             :         }
    1284             : 
    1285             :         /* XLOG stuff */
    1286    11074602 :         if (RelationNeedsWAL(rel))
    1287             :         {
    1288             :             xl_btree_insert xlrec;
    1289             :             xl_btree_metadata xlmeta;
    1290             :             uint8       xlinfo;
    1291             :             XLogRecPtr  recptr;
    1292             :             uint16      upostingoff;
    1293             : 
    1294    10666350 :             xlrec.offnum = newitemoff;
    1295             : 
    1296    10666350 :             XLogBeginInsert();
    1297    10666350 :             XLogRegisterData((char *) &xlrec, SizeOfBtreeInsert);
    1298             : 
    1299    10666350 :             if (isleaf && postingoff == 0)
    1300             :             {
    1301             :                 /* Simple leaf insert */
    1302    10619584 :                 xlinfo = XLOG_BTREE_INSERT_LEAF;
    1303             :             }
    1304       46766 :             else if (postingoff != 0)
    1305             :             {
    1306             :                 /*
    1307             :                  * Leaf insert with posting list split.  Must include
    1308             :                  * postingoff field before newitem/orignewitem.
    1309             :                  */
    1310             :                 Assert(isleaf);
    1311        4010 :                 xlinfo = XLOG_BTREE_INSERT_POST;
    1312             :             }
    1313             :             else
    1314             :             {
    1315             :                 /* Internal page insert, which finishes a split on cbuf */
    1316       42756 :                 xlinfo = XLOG_BTREE_INSERT_UPPER;
    1317       42756 :                 XLogRegisterBuffer(1, cbuf, REGBUF_STANDARD);
    1318             : 
    1319       42756 :                 if (BufferIsValid(metabuf))
    1320             :                 {
    1321             :                     /* Actually, it's an internal page insert + meta update */
    1322           6 :                     xlinfo = XLOG_BTREE_INSERT_META;
    1323             : 
    1324             :                     Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
    1325           6 :                     xlmeta.version = metad->btm_version;
    1326           6 :                     xlmeta.root = metad->btm_root;
    1327           6 :                     xlmeta.level = metad->btm_level;
    1328           6 :                     xlmeta.fastroot = metad->btm_fastroot;
    1329           6 :                     xlmeta.fastlevel = metad->btm_fastlevel;
    1330           6 :                     xlmeta.oldest_btpo_xact = metad->btm_oldest_btpo_xact;
    1331           6 :                     xlmeta.last_cleanup_num_heap_tuples =
    1332           6 :                         metad->btm_last_cleanup_num_heap_tuples;
    1333           6 :                     xlmeta.allequalimage = metad->btm_allequalimage;
    1334             : 
    1335           6 :                     XLogRegisterBuffer(2, metabuf,
    1336             :                                        REGBUF_WILL_INIT | REGBUF_STANDARD);
    1337           6 :                     XLogRegisterBufData(2, (char *) &xlmeta,
    1338             :                                         sizeof(xl_btree_metadata));
    1339             :                 }
    1340             :             }
    1341             : 
    1342    10666350 :             XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    1343    10666350 :             if (postingoff == 0)
    1344             :             {
    1345             :                 /* Just log itup from caller */
    1346    10662340 :                 XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
    1347             :             }
    1348             :             else
    1349             :             {
    1350             :                 /*
    1351             :                  * Insert with posting list split (XLOG_BTREE_INSERT_POST
    1352             :                  * record) case.
    1353             :                  *
    1354             :                  * Log postingoff.  Also log origitup, not itup.  REDO routine
    1355             :                  * must reconstruct final itup (as well as nposting) using
    1356             :                  * _bt_swap_posting().
    1357             :                  */
    1358        4010 :                 upostingoff = postingoff;
    1359             : 
    1360        4010 :                 XLogRegisterBufData(0, (char *) &upostingoff, sizeof(uint16));
    1361        4010 :                 XLogRegisterBufData(0, (char *) origitup,
    1362        4010 :                                     IndexTupleSize(origitup));
    1363             :             }
    1364             : 
    1365    10666350 :             recptr = XLogInsert(RM_BTREE_ID, xlinfo);
    1366             : 
    1367    10666350 :             if (BufferIsValid(metabuf))
    1368           6 :                 PageSetLSN(metapg, recptr);
    1369    10666350 :             if (!isleaf)
    1370       42756 :                 PageSetLSN(BufferGetPage(cbuf), recptr);
    1371             : 
    1372    10666350 :             PageSetLSN(page, recptr);
    1373             :         }
    1374             : 
    1375    11074602 :         END_CRIT_SECTION();
    1376             : 
    1377             :         /* Release subsidiary buffers */
    1378    11074602 :         if (BufferIsValid(metabuf))
    1379           6 :             _bt_relbuf(rel, metabuf);
    1380    11074602 :         if (!isleaf)
    1381       43838 :             _bt_relbuf(rel, cbuf);
    1382             : 
    1383             :         /*
    1384             :          * Cache the block number if this is the rightmost leaf page.  Cache
    1385             :          * may be used by a future inserter within _bt_search_insert().
    1386             :          */
    1387    11074602 :         blockcache = InvalidBlockNumber;
    1388    11074602 :         if (isrightmost && isleaf && !P_ISROOT(lpageop))
    1389     5621626 :             blockcache = BufferGetBlockNumber(buf);
    1390             : 
    1391             :         /* Release buffer for insertion target block */
    1392    11074602 :         _bt_relbuf(rel, buf);
    1393             : 
    1394             :         /*
    1395             :          * If we decided to cache the insertion target block before releasing
    1396             :          * its buffer lock, then cache it now.  Check the height of the tree
    1397             :          * first, though.  We don't go for the optimization with small
    1398             :          * indexes.  Defer final check to this point to ensure that we don't
    1399             :          * call _bt_getrootheight while holding a buffer lock.
    1400             :          */
    1401    16696228 :         if (BlockNumberIsValid(blockcache) &&
    1402     5621626 :             _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)
    1403       68296 :             RelationSetTargetBlock(rel, blockcache);
    1404             :     }
    1405             : 
    1406             :     /* be tidy */
    1407    11120950 :     if (postingoff != 0)
    1408             :     {
    1409             :         /* itup is actually a modified copy of caller's original */
    1410        4022 :         pfree(nposting);
    1411        4022 :         pfree(itup);
    1412             :     }
    1413    11120950 : }
    1414             : 
    1415             : /*
    1416             :  *  _bt_split() -- split a page in the btree.
    1417             :  *
    1418             :  *      On entry, buf is the page to split, and is pinned and write-locked.
    1419             :  *      newitemoff etc. tell us about the new item that must be inserted
    1420             :  *      along with the data from the original page.
    1421             :  *
    1422             :  *      itup_key is used for suffix truncation on leaf pages (internal
    1423             :  *      page callers pass NULL).  When splitting a non-leaf page, 'cbuf'
    1424             :  *      is the left-sibling of the page we're inserting the downlink for.
    1425             :  *      This function will clear the INCOMPLETE_SPLIT flag on it, and
    1426             :  *      release the buffer.
    1427             :  *
    1428             :  *      orignewitem, nposting, and postingoff are needed when an insert of
    1429             :  *      orignewitem results in both a posting list split and a page split.
    1430             :  *      These extra posting list split details are used here in the same
    1431             :  *      way as they are used in the more common case where a posting list
    1432             :  *      split does not coincide with a page split.  We need to deal with
    1433             :  *      posting list splits directly in order to ensure that everything
    1434             :  *      that follows from the insert of orignewitem is handled as a single
    1435             :  *      atomic operation (though caller's insert of a new pivot/downlink
    1436             :  *      into parent page will still be a separate operation).  See
    1437             :  *      nbtree/README for details on the design of posting list splits.
    1438             :  *
    1439             :  *      Returns the new right sibling of buf, pinned and write-locked.
    1440             :  *      The pin and lock on buf are maintained.
    1441             :  */
    1442             : static Buffer
    1443       46348 : _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf, Buffer cbuf,
    1444             :           OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
    1445             :           IndexTuple orignewitem, IndexTuple nposting, uint16 postingoff)
    1446             : {
    1447             :     Buffer      rbuf;
    1448             :     Page        origpage;
    1449             :     Page        leftpage,
    1450             :                 rightpage;
    1451             :     BlockNumber origpagenumber,
    1452             :                 rightpagenumber;
    1453             :     BTPageOpaque ropaque,
    1454             :                 lopaque,
    1455             :                 oopaque;
    1456       46348 :     Buffer      sbuf = InvalidBuffer;
    1457       46348 :     Page        spage = NULL;
    1458       46348 :     BTPageOpaque sopaque = NULL;
    1459             :     Size        itemsz;
    1460             :     ItemId      itemid;
    1461             :     IndexTuple  firstright,
    1462             :                 lefthighkey;
    1463             :     OffsetNumber firstrightoff;
    1464             :     OffsetNumber afterleftoff,
    1465             :                 afterrightoff,
    1466             :                 minusinfoff;
    1467             :     OffsetNumber origpagepostingoff;
    1468             :     OffsetNumber maxoff;
    1469             :     OffsetNumber i;
    1470             :     bool        newitemonleft,
    1471             :                 isleaf,
    1472             :                 isrightmost;
    1473             : 
    1474             :     /*
    1475             :      * origpage is the original page to be split.  leftpage is a temporary
    1476             :      * buffer that receives the left-sibling data, which will be copied back
    1477             :      * into origpage on success.  rightpage is the new page that will receive
    1478             :      * the right-sibling data.
    1479             :      *
    1480             :      * leftpage is allocated after choosing a split point.  rightpage's new
    1481             :      * buffer isn't acquired until after leftpage is initialized and has new
    1482             :      * high key, the last point where splitting the page may fail (barring
    1483             :      * corruption).  Failing before acquiring new buffer won't have lasting
    1484             :      * consequences, since origpage won't have been modified and leftpage is
    1485             :      * only workspace.
    1486             :      */
    1487       46348 :     origpage = BufferGetPage(buf);
    1488       46348 :     oopaque = (BTPageOpaque) PageGetSpecialPointer(origpage);
    1489       46348 :     isleaf = P_ISLEAF(oopaque);
    1490       46348 :     isrightmost = P_RIGHTMOST(oopaque);
    1491       46348 :     maxoff = PageGetMaxOffsetNumber(origpage);
    1492       46348 :     origpagenumber = BufferGetBlockNumber(buf);
    1493             : 
    1494             :     /*
    1495             :      * Choose a point to split origpage at.
    1496             :      *
    1497             :      * A split point can be thought of as a point _between_ two existing data
    1498             :      * items on origpage (the lastleft and firstright tuples), provided you
    1499             :      * pretend that the new item that didn't fit is already on origpage.
    1500             :      *
    1501             :      * Since origpage does not actually contain newitem, the representation of
    1502             :      * split points needs to work with two boundary cases: splits where
    1503             :      * newitem is lastleft, and splits where newitem is firstright.
    1504             :      * newitemonleft resolves the ambiguity that would otherwise exist when
    1505             :      * newitemoff == firstrightoff.  In all other cases it's clear which side
    1506             :      * of the split every tuple goes on from context.  newitemonleft is
    1507             :      * usually (but not always) redundant information.
    1508             :      *
    1509             :      * firstrightoff is supposed to be an origpage offset number, but it's
    1510             :      * possible that its value will be maxoff+1, which is "past the end" of
    1511             :      * origpage.  This happens in the rare case where newitem goes after all
    1512             :      * existing items (i.e. newitemoff is maxoff+1) and we end up splitting
    1513             :      * origpage at the point that leaves newitem alone on new right page.  Any
    1514             :      * "!newitemonleft && newitemoff == firstrightoff" split point makes
    1515             :      * newitem the firstright tuple, though, so this case isn't a special
    1516             :      * case.
    1517             :      */
    1518       46348 :     firstrightoff = _bt_findsplitloc(rel, origpage, newitemoff, newitemsz,
    1519             :                                      newitem, &newitemonleft);
    1520             : 
    1521             :     /* Allocate temp buffer for leftpage */
    1522       46348 :     leftpage = PageGetTempPage(origpage);
    1523       46348 :     _bt_pageinit(leftpage, BufferGetPageSize(buf));
    1524       46348 :     lopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
    1525             : 
    1526             :     /*
    1527             :      * leftpage won't be the root when we're done.  Also, clear the SPLIT_END
    1528             :      * and HAS_GARBAGE flags.
    1529             :      */
    1530       46348 :     lopaque->btpo_flags = oopaque->btpo_flags;
    1531       46348 :     lopaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
    1532             :     /* set flag in leftpage indicating that rightpage has no downlink yet */
    1533       46348 :     lopaque->btpo_flags |= BTP_INCOMPLETE_SPLIT;
    1534       46348 :     lopaque->btpo_prev = oopaque->btpo_prev;
    1535             :     /* handle btpo_next after rightpage buffer acquired */
    1536       46348 :     lopaque->btpo.level = oopaque->btpo.level;
    1537             :     /* handle btpo_cycleid after rightpage buffer acquired */
    1538             : 
    1539             :     /*
    1540             :      * Copy the original page's LSN into leftpage, which will become the
    1541             :      * updated version of the page.  We need this because XLogInsert will
    1542             :      * examine the LSN and possibly dump it in a page image.
    1543             :      */
    1544       46348 :     PageSetLSN(leftpage, PageGetLSN(origpage));
    1545             : 
    1546             :     /*
    1547             :      * Determine page offset number of existing overlapped-with-orignewitem
    1548             :      * posting list when it is necessary to perform a posting list split in
    1549             :      * passing.  Note that newitem was already changed by caller (newitem no
    1550             :      * longer has the orignewitem TID).
    1551             :      *
    1552             :      * This page offset number (origpagepostingoff) will be used to pretend
    1553             :      * that the posting split has already taken place, even though the
    1554             :      * required modifications to origpage won't occur until we reach the
    1555             :      * critical section.  The lastleft and firstright tuples of our page split
    1556             :      * point should, in effect, come from an imaginary version of origpage
    1557             :      * that has the nposting tuple instead of the original posting list tuple.
    1558             :      *
    1559             :      * Note: _bt_findsplitloc() should have compensated for coinciding posting
    1560             :      * list splits in just the same way, at least in theory.  It doesn't
    1561             :      * bother with that, though.  In practice it won't affect its choice of
    1562             :      * split point.
    1563             :      */
    1564       46348 :     origpagepostingoff = InvalidOffsetNumber;
    1565       46348 :     if (postingoff != 0)
    1566             :     {
    1567             :         Assert(isleaf);
    1568             :         Assert(ItemPointerCompare(&orignewitem->t_tid,
    1569             :                                   &newitem->t_tid) < 0);
    1570             :         Assert(BTreeTupleIsPosting(nposting));
    1571          12 :         origpagepostingoff = OffsetNumberPrev(newitemoff);
    1572             :     }
    1573             : 
    1574             :     /*
    1575             :      * The high key for the new left page is a possibly-truncated copy of
    1576             :      * firstright on the leaf level (it's "firstright itself" on internal
    1577             :      * pages; see !isleaf comments below).  This may seem to be contrary to
    1578             :      * Lehman & Yao's approach of using a copy of lastleft as the new high key
    1579             :      * when splitting on the leaf level.  It isn't, though.
    1580             :      *
    1581             :      * Suffix truncation will leave the left page's high key fully equal to
    1582             :      * lastleft when lastleft and firstright are equal prior to heap TID (that
    1583             :      * is, the tiebreaker TID value comes from lastleft).  It isn't actually
    1584             :      * necessary for a new leaf high key to be a copy of lastleft for the L&Y
    1585             :      * "subtree" invariant to hold.  It's sufficient to make sure that the new
    1586             :      * leaf high key is strictly less than firstright, and greater than or
    1587             :      * equal to (not necessarily equal to) lastleft.  In other words, when
    1588             :      * suffix truncation isn't possible during a leaf page split, we take
    1589             :      * L&Y's exact approach to generating a new high key for the left page.
    1590             :      * (Actually, that is slightly inaccurate.  We don't just use a copy of
    1591             :      * lastleft.  A tuple with all the keys from firstright but the max heap
    1592             :      * TID from lastleft is used, to avoid introducing a special case.)
    1593             :      */
    1594       46348 :     if (!newitemonleft && newitemoff == firstrightoff)
    1595             :     {
    1596             :         /* incoming tuple becomes firstright */
    1597          20 :         itemsz = newitemsz;
    1598          20 :         firstright = newitem;
    1599             :     }
    1600             :     else
    1601             :     {
    1602             :         /* existing item at firstrightoff becomes firstright */
    1603       46328 :         itemid = PageGetItemId(origpage, firstrightoff);
    1604       46328 :         itemsz = ItemIdGetLength(itemid);
    1605       46328 :         firstright = (IndexTuple) PageGetItem(origpage, itemid);
    1606       46328 :         if (firstrightoff == origpagepostingoff)
    1607           0 :             firstright = nposting;
    1608             :     }
    1609             : 
    1610       46348 :     if (isleaf)
    1611             :     {
    1612             :         IndexTuple  lastleft;
    1613             : 
    1614             :         /* Attempt suffix truncation for leaf page splits */
    1615       46248 :         if (newitemonleft && newitemoff == firstrightoff)
    1616             :         {
    1617             :             /* incoming tuple becomes lastleft */
    1618        5836 :             lastleft = newitem;
    1619             :         }
    1620             :         else
    1621             :         {
    1622             :             OffsetNumber lastleftoff;
    1623             : 
    1624             :             /* existing item before firstrightoff becomes lastleft */
    1625       40412 :             lastleftoff = OffsetNumberPrev(firstrightoff);
    1626             :             Assert(lastleftoff >= P_FIRSTDATAKEY(oopaque));
    1627       40412 :             itemid = PageGetItemId(origpage, lastleftoff);
    1628       40412 :             lastleft = (IndexTuple) PageGetItem(origpage, itemid);
    1629       40412 :             if (lastleftoff == origpagepostingoff)
    1630           0 :                 lastleft = nposting;
    1631             :         }
    1632             : 
    1633       46248 :         lefthighkey = _bt_truncate(rel, lastleft, firstright, itup_key);
    1634       46248 :         itemsz = IndexTupleSize(lefthighkey);
    1635             :     }
    1636             :     else
    1637             :     {
    1638             :         /*
    1639             :          * Don't perform suffix truncation on a copy of firstright to make
    1640             :          * left page high key for internal page splits.  Must use firstright
    1641             :          * as new high key directly.
    1642             :          *
    1643             :          * Each distinct separator key value originates as a leaf level high
    1644             :          * key; all other separator keys/pivot tuples are copied from one
    1645             :          * level down.  A separator key in a grandparent page must be
    1646             :          * identical to high key in rightmost parent page of the subtree to
    1647             :          * its left, which must itself be identical to high key in rightmost
    1648             :          * child page of that same subtree (this even applies to separator
    1649             :          * from grandparent's high key).  There must always be an unbroken
    1650             :          * "seam" of identical separator keys that guide index scans at every
    1651             :          * level, starting from the grandparent.  That's why suffix truncation
    1652             :          * is unsafe here.
    1653             :          *
    1654             :          * Internal page splits will truncate firstright into a "negative
    1655             :          * infinity" data item when it gets inserted on the new right page
    1656             :          * below, though.  This happens during the call to _bt_pgaddtup() for
    1657             :          * the new first data item for right page.  Do not confuse this
    1658             :          * mechanism with suffix truncation.  It is just a convenient way of
    1659             :          * implementing page splits that split the internal page "inside"
    1660             :          * firstright.  The lefthighkey separator key cannot appear a second
    1661             :          * time in the right page (only firstright's downlink goes in right
    1662             :          * page).
    1663             :          */
    1664         100 :         lefthighkey = firstright;
    1665             :     }
    1666             : 
    1667             :     /*
    1668             :      * Add new high key to leftpage
    1669             :      */
    1670       46348 :     afterleftoff = P_HIKEY;
    1671             : 
    1672             :     Assert(BTreeTupleGetNAtts(lefthighkey, rel) > 0);
    1673             :     Assert(BTreeTupleGetNAtts(lefthighkey, rel) <=
    1674             :            IndexRelationGetNumberOfKeyAttributes(rel));
    1675             :     Assert(itemsz == MAXALIGN(IndexTupleSize(lefthighkey)));
    1676       46348 :     if (PageAddItem(leftpage, (Item) lefthighkey, itemsz, afterleftoff, false,
    1677             :                     false) == InvalidOffsetNumber)
    1678           0 :         elog(ERROR, "failed to add high key to the left sibling"
    1679             :              " while splitting block %u of index \"%s\"",
    1680             :              origpagenumber, RelationGetRelationName(rel));
    1681       46348 :     afterleftoff = OffsetNumberNext(afterleftoff);
    1682             : 
    1683             :     /*
    1684             :      * Acquire a new right page to split into, now that left page has a new
    1685             :      * high key.  From here on, it's not okay to throw an error without
    1686             :      * zeroing rightpage first.  This coding rule ensures that we won't
    1687             :      * confuse future VACUUM operations, which might otherwise try to re-find
    1688             :      * a downlink to a leftover junk page as the page undergoes deletion.
    1689             :      *
    1690             :      * It would be reasonable to start the critical section just after the new
    1691             :      * rightpage buffer is acquired instead; that would allow us to avoid
    1692             :      * leftover junk pages without bothering to zero rightpage.  We do it this
    1693             :      * way because it avoids an unnecessary PANIC when either origpage or its
    1694             :      * existing sibling page are corrupt.
    1695             :      */
    1696       46348 :     rbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
    1697       46348 :     rightpage = BufferGetPage(rbuf);
    1698       46348 :     rightpagenumber = BufferGetBlockNumber(rbuf);
    1699             :     /* rightpage was initialized by _bt_getbuf */
    1700       46348 :     ropaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
    1701             : 
    1702             :     /*
    1703             :      * Finish off remaining leftpage special area fields.  They cannot be set
    1704             :      * before both origpage (leftpage) and rightpage buffers are acquired and
    1705             :      * locked.
    1706             :      *
    1707             :      * btpo_cycleid is only used with leaf pages, though we set it here in all
    1708             :      * cases just to be consistent.
    1709             :      */
    1710       46348 :     lopaque->btpo_next = rightpagenumber;
    1711       46348 :     lopaque->btpo_cycleid = _bt_vacuum_cycleid(rel);
    1712             : 
    1713             :     /*
    1714             :      * rightpage won't be the root when we're done.  Also, clear the SPLIT_END
    1715             :      * and HAS_GARBAGE flags.
    1716             :      */
    1717       46348 :     ropaque->btpo_flags = oopaque->btpo_flags;
    1718       46348 :     ropaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
    1719       46348 :     ropaque->btpo_prev = origpagenumber;
    1720       46348 :     ropaque->btpo_next = oopaque->btpo_next;
    1721       46348 :     ropaque->btpo.level = oopaque->btpo.level;
    1722       46348 :     ropaque->btpo_cycleid = lopaque->btpo_cycleid;
    1723             : 
    1724             :     /*
    1725             :      * Add new high key to rightpage where necessary.
    1726             :      *
    1727             :      * If the page we're splitting is not the rightmost page at its level in
    1728             :      * the tree, then the first entry on the page is the high key from
    1729             :      * origpage.
    1730             :      */
    1731       46348 :     afterrightoff = P_HIKEY;
    1732             : 
    1733       46348 :     if (!isrightmost)
    1734             :     {
    1735             :         IndexTuple  righthighkey;
    1736             : 
    1737       25432 :         itemid = PageGetItemId(origpage, P_HIKEY);
    1738       25432 :         itemsz = ItemIdGetLength(itemid);
    1739       25432 :         righthighkey = (IndexTuple) PageGetItem(origpage, itemid);
    1740             :         Assert(BTreeTupleGetNAtts(righthighkey, rel) > 0);
    1741             :         Assert(BTreeTupleGetNAtts(righthighkey, rel) <=
    1742             :                IndexRelationGetNumberOfKeyAttributes(rel));
    1743       25432 :         if (PageAddItem(rightpage, (Item) righthighkey, itemsz, afterrightoff,
    1744             :                         false, false) == InvalidOffsetNumber)
    1745             :         {
    1746           0 :             memset(rightpage, 0, BufferGetPageSize(rbuf));
    1747           0 :             elog(ERROR, "failed to add high key to the right sibling"
    1748             :                  " while splitting block %u of index \"%s\"",
    1749             :                  origpagenumber, RelationGetRelationName(rel));
    1750             :         }
    1751       25432 :         afterrightoff = OffsetNumberNext(afterrightoff);
    1752             :     }
    1753             : 
    1754             :     /*
    1755             :      * Internal page splits truncate first data item on right page -- it
    1756             :      * becomes "minus infinity" item for the page.  Set this up here.
    1757             :      */
    1758       46348 :     minusinfoff = InvalidOffsetNumber;
    1759       46348 :     if (!isleaf)
    1760         100 :         minusinfoff = afterrightoff;
    1761             : 
    1762             :     /*
    1763             :      * Now transfer all the data items (non-pivot tuples in isleaf case, or
    1764             :      * additional pivot tuples in !isleaf case) to the appropriate page.
    1765             :      *
    1766             :      * Note: we *must* insert at least the right page's items in item-number
    1767             :      * order, for the benefit of _bt_restore_page().
    1768             :      */
    1769    13940140 :     for (i = P_FIRSTDATAKEY(oopaque); i <= maxoff; i = OffsetNumberNext(i))
    1770             :     {
    1771             :         IndexTuple  dataitem;
    1772             : 
    1773    13893792 :         itemid = PageGetItemId(origpage, i);
    1774    13893792 :         itemsz = ItemIdGetLength(itemid);
    1775    13893792 :         dataitem = (IndexTuple) PageGetItem(origpage, itemid);
    1776             : 
    1777             :         /* replace original item with nposting due to posting split? */
    1778    13893792 :         if (i == origpagepostingoff)
    1779             :         {
    1780             :             Assert(BTreeTupleIsPosting(dataitem));
    1781             :             Assert(itemsz == MAXALIGN(IndexTupleSize(nposting)));
    1782          12 :             dataitem = nposting;
    1783             :         }
    1784             : 
    1785             :         /* does new item belong before this one? */
    1786    13893780 :         else if (i == newitemoff)
    1787             :         {
    1788       26844 :             if (newitemonleft)
    1789             :             {
    1790             :                 Assert(newitemoff <= firstrightoff);
    1791       10760 :                 if (!_bt_pgaddtup(leftpage, newitemsz, newitem, afterleftoff,
    1792             :                                   false))
    1793             :                 {
    1794           0 :                     memset(rightpage, 0, BufferGetPageSize(rbuf));
    1795           0 :                     elog(ERROR, "failed to add new item to the left sibling"
    1796             :                          " while splitting block %u of index \"%s\"",
    1797             :                          origpagenumber, RelationGetRelationName(rel));
    1798             :                 }
    1799       10760 :                 afterleftoff = OffsetNumberNext(afterleftoff);
    1800             :             }
    1801             :             else
    1802             :             {
    1803             :                 Assert(newitemoff >= firstrightoff);
    1804       16084 :                 if (!_bt_pgaddtup(rightpage, newitemsz, newitem, afterrightoff,
    1805             :                                   afterrightoff == minusinfoff))
    1806             :                 {
    1807           0 :                     memset(rightpage, 0, BufferGetPageSize(rbuf));
    1808           0 :                     elog(ERROR, "failed to add new item to the right sibling"
    1809             :                          " while splitting block %u of index \"%s\"",
    1810             :                          origpagenumber, RelationGetRelationName(rel));
    1811             :                 }
    1812       16084 :                 afterrightoff = OffsetNumberNext(afterrightoff);
    1813             :             }
    1814             :         }
    1815             : 
    1816             :         /* decide which page to put it on */
    1817    13893792 :         if (i < firstrightoff)
    1818             :         {
    1819    10388654 :             if (!_bt_pgaddtup(leftpage, itemsz, dataitem, afterleftoff, false))
    1820             :             {
    1821           0 :                 memset(rightpage, 0, BufferGetPageSize(rbuf));
    1822           0 :                 elog(ERROR, "failed to add old item to the left sibling"
    1823             :                      " while splitting block %u of index \"%s\"",
    1824             :                      origpagenumber, RelationGetRelationName(rel));
    1825             :             }
    1826    10388654 :             afterleftoff = OffsetNumberNext(afterleftoff);
    1827             :         }
    1828             :         else
    1829             :         {
    1830     3505138 :             if (!_bt_pgaddtup(rightpage, itemsz, dataitem, afterrightoff,
    1831             :                               afterrightoff == minusinfoff))
    1832             :             {
    1833           0 :                 memset(rightpage, 0, BufferGetPageSize(rbuf));
    1834           0 :                 elog(ERROR, "failed to add old item to the right sibling"
    1835             :                      " while splitting block %u of index \"%s\"",
    1836             :                      origpagenumber, RelationGetRelationName(rel));
    1837             :             }
    1838     3505138 :             afterrightoff = OffsetNumberNext(afterrightoff);
    1839             :         }
    1840             :     }
    1841             : 
    1842             :     /* Handle case where newitem goes at the end of rightpage */
    1843       46348 :     if (i <= newitemoff)
    1844             :     {
    1845             :         /*
    1846             :          * Can't have newitemonleft here; that would imply we were told to put
    1847             :          * *everything* on the left page, which cannot fit (if it could, we'd
    1848             :          * not be splitting the page).
    1849             :          */
    1850             :         Assert(!newitemonleft && newitemoff == maxoff + 1);
    1851       19504 :         if (!_bt_pgaddtup(rightpage, newitemsz, newitem, afterrightoff,
    1852             :                           afterrightoff == minusinfoff))
    1853             :         {
    1854           0 :             memset(rightpage, 0, BufferGetPageSize(rbuf));
    1855           0 :             elog(ERROR, "failed to add new item to the right sibling"
    1856             :                  " while splitting block %u of index \"%s\"",
    1857             :                  origpagenumber, RelationGetRelationName(rel));
    1858             :         }
    1859       19504 :         afterrightoff = OffsetNumberNext(afterrightoff);
    1860             :     }
    1861             : 
    1862             :     /*
    1863             :      * We have to grab the right sibling (if any) and fix the prev pointer
    1864             :      * there. We are guaranteed that this is deadlock-free since no other
    1865             :      * writer will be holding a lock on that page and trying to move left, and
    1866             :      * all readers release locks on a page before trying to fetch its
    1867             :      * neighbors.
    1868             :      */
    1869       46348 :     if (!isrightmost)
    1870             :     {
    1871       25432 :         sbuf = _bt_getbuf(rel, oopaque->btpo_next, BT_WRITE);
    1872       25432 :         spage = BufferGetPage(sbuf);
    1873       25432 :         sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
    1874       25432 :         if (sopaque->btpo_prev != origpagenumber)
    1875             :         {
    1876           0 :             memset(rightpage, 0, BufferGetPageSize(rbuf));
    1877           0 :             ereport(ERROR,
    1878             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    1879             :                      errmsg_internal("right sibling's left-link doesn't match: "
    1880             :                                      "block %u links to %u instead of expected %u in index \"%s\"",
    1881             :                                      oopaque->btpo_next, sopaque->btpo_prev, origpagenumber,
    1882             :                                      RelationGetRelationName(rel))));
    1883             :         }
    1884             : 
    1885             :         /*
    1886             :          * Check to see if we can set the SPLIT_END flag in the right-hand
    1887             :          * split page; this can save some I/O for vacuum since it need not
    1888             :          * proceed to the right sibling.  We can set the flag if the right
    1889             :          * sibling has a different cycleid: that means it could not be part of
    1890             :          * a group of pages that were all split off from the same ancestor
    1891             :          * page.  If you're confused, imagine that page A splits to A B and
    1892             :          * then again, yielding A C B, while vacuum is in progress.  Tuples
    1893             :          * originally in A could now be in either B or C, hence vacuum must
    1894             :          * examine both pages.  But if D, our right sibling, has a different
    1895             :          * cycleid then it could not contain any tuples that were in A when
    1896             :          * the vacuum started.
    1897             :          */
    1898       25432 :         if (sopaque->btpo_cycleid != ropaque->btpo_cycleid)
    1899           0 :             ropaque->btpo_flags |= BTP_SPLIT_END;
    1900             :     }
    1901             : 
    1902             :     /*
    1903             :      * Right sibling is locked, new siblings are prepared, but original page
    1904             :      * is not updated yet.
    1905             :      *
    1906             :      * NO EREPORT(ERROR) till right sibling is updated.  We can get away with
    1907             :      * not starting the critical section till here because we haven't been
    1908             :      * scribbling on the original page yet; see comments above.
    1909             :      */
    1910       46348 :     START_CRIT_SECTION();
    1911             : 
    1912             :     /*
    1913             :      * By here, the original data page has been split into two new halves, and
    1914             :      * these are correct.  The algorithm requires that the left page never
    1915             :      * move during a split, so we copy the new left page back on top of the
    1916             :      * original.  We need to do this before writing the WAL record, so that
    1917             :      * XLogInsert can WAL log an image of the page if necessary.
    1918             :      */
    1919       46348 :     PageRestoreTempPage(leftpage, origpage);
    1920             :     /* leftpage, lopaque must not be used below here */
    1921             : 
    1922       46348 :     MarkBufferDirty(buf);
    1923       46348 :     MarkBufferDirty(rbuf);
    1924             : 
    1925       46348 :     if (!isrightmost)
    1926             :     {
    1927       25432 :         sopaque->btpo_prev = rightpagenumber;
    1928       25432 :         MarkBufferDirty(sbuf);
    1929             :     }
    1930             : 
    1931             :     /*
    1932             :      * Clear INCOMPLETE_SPLIT flag on child if inserting the new item finishes
    1933             :      * a split
    1934             :      */
    1935       46348 :     if (!isleaf)
    1936             :     {
    1937         100 :         Page        cpage = BufferGetPage(cbuf);
    1938         100 :         BTPageOpaque cpageop = (BTPageOpaque) PageGetSpecialPointer(cpage);
    1939             : 
    1940         100 :         cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
    1941         100 :         MarkBufferDirty(cbuf);
    1942             :     }
    1943             : 
    1944             :     /* XLOG stuff */
    1945       46348 :     if (RelationNeedsWAL(rel))
    1946             :     {
    1947             :         xl_btree_split xlrec;
    1948             :         uint8       xlinfo;
    1949             :         XLogRecPtr  recptr;
    1950             : 
    1951       45252 :         xlrec.level = ropaque->btpo.level;
    1952             :         /* See comments below on newitem, orignewitem, and posting lists */
    1953       45252 :         xlrec.firstrightoff = firstrightoff;
    1954       45252 :         xlrec.newitemoff = newitemoff;
    1955       45252 :         xlrec.postingoff = 0;
    1956       45252 :         if (postingoff != 0 && origpagepostingoff < firstrightoff)
    1957          12 :             xlrec.postingoff = postingoff;
    1958             : 
    1959       45252 :         XLogBeginInsert();
    1960       45252 :         XLogRegisterData((char *) &xlrec, SizeOfBtreeSplit);
    1961             : 
    1962       45252 :         XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    1963       45252 :         XLogRegisterBuffer(1, rbuf, REGBUF_WILL_INIT);
    1964             :         /* Log original right sibling, since we've changed its prev-pointer */
    1965       45252 :         if (!isrightmost)
    1966       25424 :             XLogRegisterBuffer(2, sbuf, REGBUF_STANDARD);
    1967       45252 :         if (!isleaf)
    1968         100 :             XLogRegisterBuffer(3, cbuf, REGBUF_STANDARD);
    1969             : 
    1970             :         /*
    1971             :          * Log the new item, if it was inserted on the left page. (If it was
    1972             :          * put on the right page, we don't need to explicitly WAL log it
    1973             :          * because it's included with all the other items on the right page.)
    1974             :          * Show the new item as belonging to the left page buffer, so that it
    1975             :          * is not stored if XLogInsert decides it needs a full-page image of
    1976             :          * the left page.  We always store newitemoff in the record, though.
    1977             :          *
    1978             :          * The details are sometimes slightly different for page splits that
    1979             :          * coincide with a posting list split.  If both the replacement
    1980             :          * posting list and newitem go on the right page, then we don't need
    1981             :          * to log anything extra, just like the simple !newitemonleft
    1982             :          * no-posting-split case (postingoff is set to zero in the WAL record,
    1983             :          * so recovery doesn't need to process a posting list split at all).
    1984             :          * Otherwise, we set postingoff and log orignewitem instead of
    1985             :          * newitem, despite having actually inserted newitem.  REDO routine
    1986             :          * must reconstruct nposting and newitem using _bt_swap_posting().
    1987             :          *
    1988             :          * Note: It's possible that our page split point is the point that
    1989             :          * makes the posting list lastleft and newitem firstright.  This is
    1990             :          * the only case where we log orignewitem/newitem despite newitem
    1991             :          * going on the right page.  If XLogInsert decides that it can omit
    1992             :          * orignewitem due to logging a full-page image of the left page,
    1993             :          * everything still works out, since recovery only needs to log
    1994             :          * orignewitem for items on the left page (just like the regular
    1995             :          * newitem-logged case).
    1996             :          */
    1997       45252 :         if (newitemonleft && xlrec.postingoff == 0)
    1998       10744 :             XLogRegisterBufData(0, (char *) newitem, newitemsz);
    1999       34508 :         else if (xlrec.postingoff != 0)
    2000             :         {
    2001             :             Assert(isleaf);
    2002             :             Assert(newitemonleft || firstrightoff == newitemoff);
    2003             :             Assert(newitemsz == IndexTupleSize(orignewitem));
    2004          12 :             XLogRegisterBufData(0, (char *) orignewitem, newitemsz);
    2005             :         }
    2006             : 
    2007             :         /* Log the left page's new high key */
    2008       45252 :         if (!isleaf)
    2009             :         {
    2010             :             /* lefthighkey isn't local copy, get current pointer */
    2011         100 :             itemid = PageGetItemId(origpage, P_HIKEY);
    2012         100 :             lefthighkey = (IndexTuple) PageGetItem(origpage, itemid);
    2013             :         }
    2014       45252 :         XLogRegisterBufData(0, (char *) lefthighkey,
    2015       45252 :                             MAXALIGN(IndexTupleSize(lefthighkey)));
    2016             : 
    2017             :         /*
    2018             :          * Log the contents of the right page in the format understood by
    2019             :          * _bt_restore_page().  The whole right page will be recreated.
    2020             :          *
    2021             :          * Direct access to page is not good but faster - we should implement
    2022             :          * some new func in page API.  Note we only store the tuples
    2023             :          * themselves, knowing that they were inserted in item-number order
    2024             :          * and so the line pointers can be reconstructed.  See comments for
    2025             :          * _bt_restore_page().
    2026             :          */
    2027       90504 :         XLogRegisterBufData(1,
    2028       45252 :                             (char *) rightpage + ((PageHeader) rightpage)->pd_upper,
    2029       45252 :                             ((PageHeader) rightpage)->pd_special - ((PageHeader) rightpage)->pd_upper);
    2030             : 
    2031       45252 :         xlinfo = newitemonleft ? XLOG_BTREE_SPLIT_L : XLOG_BTREE_SPLIT_R;
    2032       45252 :         recptr = XLogInsert(RM_BTREE_ID, xlinfo);
    2033             : 
    2034       45252 :         PageSetLSN(origpage, recptr);
    2035       45252 :         PageSetLSN(rightpage, recptr);
    2036       45252 :         if (!isrightmost)
    2037       25424 :             PageSetLSN(spage, recptr);
    2038       45252 :         if (!isleaf)
    2039         100 :             PageSetLSN(BufferGetPage(cbuf), recptr);
    2040             :     }
    2041             : 
    2042       46348 :     END_CRIT_SECTION();
    2043             : 
    2044             :     /* release the old right sibling */
    2045       46348 :     if (!isrightmost)
    2046       25432 :         _bt_relbuf(rel, sbuf);
    2047             : 
    2048             :     /* release the child */
    2049       46348 :     if (!isleaf)
    2050         100 :         _bt_relbuf(rel, cbuf);
    2051             : 
    2052             :     /* be tidy */
    2053       46348 :     if (isleaf)
    2054       46248 :         pfree(lefthighkey);
    2055             : 
    2056             :     /* split's done */
    2057       46348 :     return rbuf;
    2058             : }
    2059             : 
    2060             : /*
    2061             :  * _bt_insert_parent() -- Insert downlink into parent, completing split.
    2062             :  *
    2063             :  * On entry, buf and rbuf are the left and right split pages, which we
    2064             :  * still hold write locks on.  Both locks will be released here.  We
    2065             :  * release the rbuf lock once we have a write lock on the page that we
    2066             :  * intend to insert a downlink to rbuf on (i.e. buf's current parent page).
    2067             :  * The lock on buf is released at the same point as the lock on the parent
    2068             :  * page, since buf's INCOMPLETE_SPLIT flag must be cleared by the same
    2069             :  * atomic operation that completes the split by inserting a new downlink.
    2070             :  *
    2071             :  * stack - stack showing how we got here.  Will be NULL when splitting true
    2072             :  *          root, or during concurrent root split, where we can be inefficient
    2073             :  * is_root - we split the true root
    2074             :  * is_only - we split a page alone on its level (might have been fast root)
    2075             :  */
    2076             : static void
    2077       46348 : _bt_insert_parent(Relation rel,
    2078             :                   Buffer buf,
    2079             :                   Buffer rbuf,
    2080             :                   BTStack stack,
    2081             :                   bool is_root,
    2082             :                   bool is_only)
    2083             : {
    2084             :     /*
    2085             :      * Here we have to do something Lehman and Yao don't talk about: deal with
    2086             :      * a root split and construction of a new root.  If our stack is empty
    2087             :      * then we have just split a node on what had been the root level when we
    2088             :      * descended the tree.  If it was still the root then we perform a
    2089             :      * new-root construction.  If it *wasn't* the root anymore, search to find
    2090             :      * the next higher level that someone constructed meanwhile, and find the
    2091             :      * right place to insert as for the normal case.
    2092             :      *
    2093             :      * If we have to search for the parent level, we do so by re-descending
    2094             :      * from the root.  This is not super-efficient, but it's rare enough not
    2095             :      * to matter.
    2096             :      */
    2097       46348 :     if (is_root)
    2098             :     {
    2099             :         Buffer      rootbuf;
    2100             : 
    2101             :         Assert(stack == NULL);
    2102             :         Assert(is_only);
    2103             :         /* create a new root node and update the metapage */
    2104        2410 :         rootbuf = _bt_newroot(rel, buf, rbuf);
    2105             :         /* release the split buffers */
    2106        2410 :         _bt_relbuf(rel, rootbuf);
    2107        2410 :         _bt_relbuf(rel, rbuf);
    2108        2410 :         _bt_relbuf(rel, buf);
    2109             :     }
    2110             :     else
    2111             :     {
    2112       43938 :         BlockNumber bknum = BufferGetBlockNumber(buf);
    2113       43938 :         BlockNumber rbknum = BufferGetBlockNumber(rbuf);
    2114       43938 :         Page        page = BufferGetPage(buf);
    2115             :         IndexTuple  new_item;
    2116             :         BTStackData fakestack;
    2117             :         IndexTuple  ritem;
    2118             :         Buffer      pbuf;
    2119             : 
    2120       43938 :         if (stack == NULL)
    2121             :         {
    2122             :             BTPageOpaque lpageop;
    2123             : 
    2124           6 :             elog(DEBUG2, "concurrent ROOT page split");
    2125           6 :             lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
    2126             : 
    2127             :             /*
    2128             :              * We should never reach here when a leaf page split takes place
    2129             :              * despite the insert of newitem being able to apply the fastpath
    2130             :              * optimization.  Make sure of that with an assertion.
    2131             :              *
    2132             :              * This is more of a performance issue than a correctness issue.
    2133             :              * The fastpath won't have a descent stack.  Using a phony stack
    2134             :              * here works, but never rely on that.  The fastpath should be
    2135             :              * rejected within _bt_search_insert() when the rightmost leaf
    2136             :              * page will split, since it's faster to go through _bt_search()
    2137             :              * and get a stack in the usual way.
    2138             :              */
    2139             :             Assert(!(P_ISLEAF(lpageop) &&
    2140             :                      BlockNumberIsValid(RelationGetTargetBlock(rel))));
    2141             : 
    2142             :             /* Find the leftmost page at the next level up */
    2143           6 :             pbuf = _bt_get_endpoint(rel, lpageop->btpo.level + 1, false,
    2144             :                                     NULL);
    2145             :             /* Set up a phony stack entry pointing there */
    2146           6 :             stack = &fakestack;
    2147           6 :             stack->bts_blkno = BufferGetBlockNumber(pbuf);
    2148           6 :             stack->bts_offset = InvalidOffsetNumber;
    2149           6 :             stack->bts_parent = NULL;
    2150           6 :             _bt_relbuf(rel, pbuf);
    2151             :         }
    2152             : 
    2153             :         /* get high key from left, a strict lower bound for new right page */
    2154       43938 :         ritem = (IndexTuple) PageGetItem(page,
    2155             :                                          PageGetItemId(page, P_HIKEY));
    2156             : 
    2157             :         /* form an index tuple that points at the new right page */
    2158       43938 :         new_item = CopyIndexTuple(ritem);
    2159       43938 :         BTreeTupleSetDownLink(new_item, rbknum);
    2160             : 
    2161             :         /*
    2162             :          * Re-find and write lock the parent of buf.
    2163             :          *
    2164             :          * It's possible that the location of buf's downlink has changed since
    2165             :          * our initial _bt_search() descent.  _bt_getstackbuf() will detect
    2166             :          * and recover from this, updating the stack, which ensures that the
    2167             :          * new downlink will be inserted at the correct offset. Even buf's
    2168             :          * parent may have changed.
    2169             :          */
    2170       43938 :         pbuf = _bt_getstackbuf(rel, stack, bknum);
    2171             : 
    2172             :         /*
    2173             :          * Unlock the right child.  The left child will be unlocked in
    2174             :          * _bt_insertonpg().
    2175             :          *
    2176             :          * Unlocking the right child must be delayed until here to ensure that
    2177             :          * no concurrent VACUUM operation can become confused.  Page deletion
    2178             :          * cannot be allowed to fail to re-find a downlink for the rbuf page.
    2179             :          * (Actually, this is just a vestige of how things used to work.  The
    2180             :          * page deletion code is expected to check for the INCOMPLETE_SPLIT
    2181             :          * flag on the left child.  It won't attempt deletion of the right
    2182             :          * child until the split is complete.  Despite all this, we opt to
    2183             :          * conservatively delay unlocking the right child until here.)
    2184             :          */
    2185       43938 :         _bt_relbuf(rel, rbuf);
    2186             : 
    2187       43938 :         if (pbuf == InvalidBuffer)
    2188           0 :             ereport(ERROR,
    2189             :                     (errcode(ERRCODE_INDEX_CORRUPTED),
    2190             :                      errmsg_internal("failed to re-find parent key in index \"%s\" for split pages %u/%u",
    2191             :                                      RelationGetRelationName(rel), bknum, rbknum)));
    2192             : 
    2193             :         /* Recursively insert into the parent */
    2194      131814 :         _bt_insertonpg(rel, NULL, pbuf, buf, stack->bts_parent,
    2195       43938 :                        new_item, MAXALIGN(IndexTupleSize(new_item)),
    2196       43938 :                        stack->bts_offset + 1, 0, is_only);
    2197             : 
    2198             :         /* be tidy */
    2199       43938 :         pfree(new_item);
    2200             :     }
    2201       46348 : }
    2202             : 
    2203             : /*
    2204             :  * _bt_finish_split() -- Finish an incomplete split
    2205             :  *
    2206             :  * A crash or other failure can leave a split incomplete.  The insertion
    2207             :  * routines won't allow to insert on a page that is incompletely split.
    2208             :  * Before inserting on such a page, call _bt_finish_split().
    2209             :  *
    2210             :  * On entry, 'lbuf' must be locked in write-mode.  On exit, it is unlocked
    2211             :  * and unpinned.
    2212             :  */
    2213             : void
    2214           0 : _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack)
    2215             : {
    2216           0 :     Page        lpage = BufferGetPage(lbuf);
    2217           0 :     BTPageOpaque lpageop = (BTPageOpaque) PageGetSpecialPointer(lpage);
    2218             :     Buffer      rbuf;
    2219             :     Page        rpage;
    2220             :     BTPageOpaque rpageop;
    2221             :     bool        was_root;
    2222             :     bool        was_only;
    2223             : 
    2224             :     Assert(P_INCOMPLETE_SPLIT(lpageop));
    2225             : 
    2226             :     /* Lock right sibling, the one missing the downlink */
    2227           0 :     rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
    2228           0 :     rpage = BufferGetPage(rbuf);
    2229           0 :     rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
    2230             : 
    2231             :     /* Could this be a root split? */
    2232           0 :     if (!stack)
    2233             :     {
    2234             :         Buffer      metabuf;
    2235             :         Page        metapg;
    2236             :         BTMetaPageData *metad;
    2237             : 
    2238             :         /* acquire lock on the metapage */
    2239           0 :         metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    2240           0 :         metapg = BufferGetPage(metabuf);
    2241           0 :         metad = BTPageGetMeta(metapg);
    2242             : 
    2243           0 :         was_root = (metad->btm_root == BufferGetBlockNumber(lbuf));
    2244             : 
    2245           0 :         _bt_relbuf(rel, metabuf);
    2246             :     }
    2247             :     else
    2248           0 :         was_root = false;
    2249             : 
    2250             :     /* Was this the only page on the level before split? */
    2251           0 :     was_only = (P_LEFTMOST(lpageop) && P_RIGHTMOST(rpageop));
    2252             : 
    2253           0 :     elog(DEBUG1, "finishing incomplete split of %u/%u",
    2254             :          BufferGetBlockNumber(lbuf), BufferGetBlockNumber(rbuf));
    2255             : 
    2256           0 :     _bt_insert_parent(rel, lbuf, rbuf, stack, was_root, was_only);
    2257           0 : }
    2258             : 
    2259             : /*
    2260             :  *  _bt_getstackbuf() -- Walk back up the tree one step, and find the pivot
    2261             :  *                       tuple whose downlink points to child page.
    2262             :  *
    2263             :  *      Caller passes child's block number, which is used to identify
    2264             :  *      associated pivot tuple in parent page using a linear search that
    2265             :  *      matches on pivot's downlink/block number.  The expected location of
    2266             :  *      the pivot tuple is taken from the stack one level above the child
    2267             :  *      page.  This is used as a starting point.  Insertions into the
    2268             :  *      parent level could cause the pivot tuple to move right; deletions
    2269             :  *      could cause it to move left, but not left of the page we previously
    2270             :  *      found it on.
    2271             :  *
    2272             :  *      Caller can use its stack to relocate the pivot tuple/downlink for
    2273             :  *      any same-level page to the right of the page found by its initial
    2274             :  *      descent.  This is necessary because of the possibility that caller
    2275             :  *      moved right to recover from a concurrent page split.  It's also
    2276             :  *      convenient for certain callers to be able to step right when there
    2277             :  *      wasn't a concurrent page split, while still using their original
    2278             :  *      stack.  For example, the checkingunique _bt_doinsert() case may
    2279             :  *      have to step right when there are many physical duplicates, and its
    2280             :  *      scantid forces an insertion to the right of the "first page the
    2281             :  *      value could be on".  (This is also relied on by all of our callers
    2282             :  *      when dealing with !heapkeyspace indexes.)
    2283             :  *
    2284             :  *      Returns write-locked parent page buffer, or InvalidBuffer if pivot
    2285             :  *      tuple not found (should not happen).  Adjusts bts_blkno &
    2286             :  *      bts_offset if changed.  Page split caller should insert its new
    2287             :  *      pivot tuple for its new right sibling page on parent page, at the
    2288             :  *      offset number bts_offset + 1.
    2289             :  */
    2290             : Buffer
    2291       48072 : _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child)
    2292             : {
    2293             :     BlockNumber blkno;
    2294             :     OffsetNumber start;
    2295             : 
    2296       48072 :     blkno = stack->bts_blkno;
    2297       48072 :     start = stack->bts_offset;
    2298             : 
    2299             :     for (;;)
    2300           6 :     {
    2301             :         Buffer      buf;
    2302             :         Page        page;
    2303             :         BTPageOpaque opaque;
    2304             : 
    2305       48078 :         buf = _bt_getbuf(rel, blkno, BT_WRITE);
    2306       48078 :         page = BufferGetPage(buf);
    2307       48078 :         opaque = (BTPageOpaque) PageGetSpecialPointer(page);
    2308             : 
    2309       48078 :         if (P_INCOMPLETE_SPLIT(opaque))
    2310             :         {
    2311           0 :             _bt_finish_split(rel, buf, stack->bts_parent);
    2312           0 :             continue;
    2313             :         }
    2314             : 
    2315       48078 :         if (!P_IGNORE(opaque))
    2316             :         {
    2317             :             OffsetNumber offnum,
    2318             :                         minoff,
    2319             :                         maxoff;
    2320             :             ItemId      itemid;
    2321             :             IndexTuple  item;
    2322             : 
    2323       48078 :             minoff = P_FIRSTDATAKEY(opaque);
    2324       48078 :             maxoff = PageGetMaxOffsetNumber(page);
    2325             : 
    2326             :             /*
    2327             :              * start = InvalidOffsetNumber means "search the whole page". We
    2328             :              * need this test anyway due to possibility that page has a high
    2329             :              * key now when it didn't before.
    2330             :              */
    2331       48078 :             if (start < minoff)
    2332          12 :                 start = minoff;
    2333             : 
    2334             :             /*
    2335             :              * Need this check too, to guard against possibility that page
    2336             :              * split since we visited it originally.
    2337             :              */
    2338       48078 :             if (start > maxoff)
    2339           6 :                 start = OffsetNumberNext(maxoff);
    2340             : 
    2341             :             /*
    2342             :              * These loops will check every item on the page --- but in an
    2343             :              * order that's attuned to the probability of where it actually
    2344             :              * is.  Scan to the right first, then to the left.
    2345             :              */
    2346       48756 :             for (offnum = start;
    2347             :                  offnum <= maxoff;
    2348         678 :                  offnum = OffsetNumberNext(offnum))
    2349             :             {
    2350       48750 :                 itemid = PageGetItemId(page, offnum);
    2351       48750 :                 item = (IndexTuple) PageGetItem(page, itemid);
    2352             : 
    2353       48750 :                 if (BTreeTupleGetDownLink(item) == child)
    2354             :                 {
    2355             :                     /* Return accurate pointer to where link is now */
    2356       48072 :                     stack->bts_blkno = blkno;
    2357       48072 :                     stack->bts_offset = offnum;
    2358       48072 :                     return buf;
    2359             :                 }
    2360             :             }
    2361             : 
    2362        1448 :             for (offnum = OffsetNumberPrev(start);
    2363             :                  offnum >= minoff;
    2364        1442 :                  offnum = OffsetNumberPrev(offnum))
    2365             :             {
    2366        1442 :                 itemid = PageGetItemId(page, offnum);
    2367        1442 :                 item = (IndexTuple) PageGetItem(page, itemid);
    2368             : 
    2369        1442 :                 if (BTreeTupleGetDownLink(item) == child)
    2370             :                 {
    2371             :                     /* Return accurate pointer to where link is now */
    2372           0 :                     stack->bts_blkno = blkno;
    2373           0 :                     stack->bts_offset = offnum;
    2374           0 :                     return buf;
    2375             :                 }
    2376             :             }
    2377             :         }
    2378             : 
    2379             :         /*
    2380             :          * The item we're looking for moved right at least one page.
    2381             :          *
    2382             :          * Lehman and Yao couple/chain locks when moving right here, which we
    2383             :          * can avoid.  See nbtree/README.
    2384             :          */
    2385           6 :         if (P_RIGHTMOST(opaque))
    2386             :         {
    2387           0 :             _bt_relbuf(rel, buf);
    2388           0 :             return InvalidBuffer;
    2389             :         }
    2390           6 :         blkno = opaque->btpo_next;
    2391           6 :         start = InvalidOffsetNumber;
    2392           6 :         _bt_relbuf(rel, buf);
    2393             :     }
    2394             : }
    2395             : 
    2396             : /*
    2397             :  *  _bt_newroot() -- Create a new root page for the index.
    2398             :  *
    2399             :  *      We've just split the old root page and need to create a new one.
    2400             :  *      In order to do this, we add a new root page to the file, then lock
    2401             :  *      the metadata page and update it.  This is guaranteed to be deadlock-
    2402             :  *      free, because all readers release their locks on the metadata page
    2403             :  *      before trying to lock the root, and all writers lock the root before
    2404             :  *      trying to lock the metadata page.  We have a write lock on the old
    2405             :  *      root page, so we have not introduced any cycles into the waits-for
    2406             :  *      graph.
    2407             :  *
    2408             :  *      On entry, lbuf (the old root) and rbuf (its new peer) are write-
    2409             :  *      locked. On exit, a new root page exists with entries for the
    2410             :  *      two new children, metapage is updated and unlocked/unpinned.
    2411             :  *      The new root buffer is returned to caller which has to unlock/unpin
    2412             :  *      lbuf, rbuf & rootbuf.
    2413             :  */
    2414             : static Buffer
    2415        2410 : _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
    2416             : {
    2417             :     Buffer      rootbuf;
    2418             :     Page        lpage,
    2419             :                 rootpage;
    2420             :     BlockNumber lbkno,
    2421             :                 rbkno;
    2422             :     BlockNumber rootblknum;
    2423             :     BTPageOpaque rootopaque;
    2424             :     BTPageOpaque lopaque;
    2425             :     ItemId      itemid;
    2426             :     IndexTuple  item;
    2427             :     IndexTuple  left_item;
    2428             :     Size        left_item_sz;
    2429             :     IndexTuple  right_item;
    2430             :     Size        right_item_sz;
    2431             :     Buffer      metabuf;
    2432             :     Page        metapg;
    2433             :     BTMetaPageData *metad;
    2434             : 
    2435        2410 :     lbkno = BufferGetBlockNumber(lbuf);
    2436        2410 :     rbkno = BufferGetBlockNumber(rbuf);
    2437        2410 :     lpage = BufferGetPage(lbuf);
    2438        2410 :     lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
    2439             : 
    2440             :     /* get a new root page */
    2441        2410 :     rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
    2442        2410 :     rootpage = BufferGetPage(rootbuf);
    2443        2410 :     rootblknum = BufferGetBlockNumber(rootbuf);
    2444             : 
    2445             :     /* acquire lock on the metapage */
    2446        2410 :     metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    2447        2410 :     metapg = BufferGetPage(metabuf);
    2448        2410 :     metad = BTPageGetMeta(metapg);
    2449             : 
    2450             :     /*
    2451             :      * Create downlink item for left page (old root).  The key value used is
    2452             :      * "minus infinity", a sentinel value that's reliably less than any real
    2453             :      * key value that could appear in the left page.
    2454             :      */
    2455        2410 :     left_item_sz = sizeof(IndexTupleData);
    2456        2410 :     left_item = (IndexTuple) palloc(left_item_sz);
    2457        2410 :     left_item->t_info = left_item_sz;
    2458        2410 :     BTreeTupleSetDownLink(left_item, lbkno);
    2459        2410 :     BTreeTupleSetNAtts(left_item, 0, false);
    2460             : 
    2461             :     /*
    2462             :      * Create downlink item for right page.  The key for it is obtained from
    2463             :      * the "high key" position in the left page.
    2464             :      */
    2465        2410 :     itemid = PageGetItemId(lpage, P_HIKEY);
    2466        2410 :     right_item_sz = ItemIdGetLength(itemid);
    2467        2410 :     item = (IndexTuple) PageGetItem(lpage, itemid);
    2468        2410 :     right_item = CopyIndexTuple(item);
    2469        2410 :     BTreeTupleSetDownLink(right_item, rbkno);
    2470             : 
    2471             :     /* NO EREPORT(ERROR) from here till newroot op is logged */
    2472        2410 :     START_CRIT_SECTION();
    2473             : 
    2474             :     /* upgrade metapage if needed */
    2475        2410 :     if (metad->btm_version < BTREE_NOVAC_VERSION)
    2476           0 :         _bt_upgrademetapage(metapg);
    2477             : 
    2478             :     /* set btree special data */
    2479        2410 :     rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
    2480        2410 :     rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
    2481        2410 :     rootopaque->btpo_flags = BTP_ROOT;
    2482        2410 :     rootopaque->btpo.level =
    2483        2410 :         ((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo.level + 1;
    2484        2410 :     rootopaque->btpo_cycleid = 0;
    2485             : 
    2486             :     /* update metapage data */
    2487        2410 :     metad->btm_root = rootblknum;
    2488        2410 :     metad->btm_level = rootopaque->btpo.level;
    2489        2410 :     metad->btm_fastroot = rootblknum;
    2490        2410 :     metad->btm_fastlevel = rootopaque->btpo.level;
    2491             : 
    2492             :     /*
    2493             :      * Insert the left page pointer into the new root page.  The root page is
    2494             :      * the rightmost page on its level so there is no "high key" in it; the
    2495             :      * two items will go into positions P_HIKEY and P_FIRSTKEY.
    2496             :      *
    2497             :      * Note: we *must* insert the two items in item-number order, for the
    2498             :      * benefit of _bt_restore_page().
    2499             :      */
    2500             :     Assert(BTreeTupleGetNAtts(left_item, rel) == 0);
    2501        2410 :     if (PageAddItem(rootpage, (Item) left_item, left_item_sz, P_HIKEY,
    2502             :                     false, false) == InvalidOffsetNumber)
    2503           0 :         elog(PANIC, "failed to add leftkey to new root page"
    2504             :              " while splitting block %u of index \"%s\"",
    2505             :              BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
    2506             : 
    2507             :     /*
    2508             :      * insert the right page pointer into the new root page.
    2509             :      */
    2510             :     Assert(BTreeTupleGetNAtts(right_item, rel) > 0);
    2511             :     Assert(BTreeTupleGetNAtts(right_item, rel) <=
    2512             :            IndexRelationGetNumberOfKeyAttributes(rel));
    2513        2410 :     if (PageAddItem(rootpage, (Item) right_item, right_item_sz, P_FIRSTKEY,
    2514             :                     false, false) == InvalidOffsetNumber)
    2515           0 :         elog(PANIC, "failed to add rightkey to new root page"
    2516             :              " while splitting block %u of index \"%s\"",
    2517             :              BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
    2518             : 
    2519             :     /* Clear the incomplete-split flag in the left child */
    2520             :     Assert(P_INCOMPLETE_SPLIT(lopaque));
    2521        2410 :     lopaque->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
    2522        2410 :     MarkBufferDirty(lbuf);
    2523             : 
    2524        2410 :     MarkBufferDirty(rootbuf);
    2525        2410 :     MarkBufferDirty(metabuf);
    2526             : 
    2527             :     /* XLOG stuff */
    2528        2410 :     if (RelationNeedsWAL(rel))
    2529             :     {
    2530             :         xl_btree_newroot xlrec;
    2531             :         XLogRecPtr  recptr;
    2532             :         xl_btree_metadata md;
    2533             : 
    2534        2396 :         xlrec.rootblk = rootblknum;
    2535        2396 :         xlrec.level = metad->btm_level;
    2536             : 
    2537        2396 :         XLogBeginInsert();
    2538        2396 :         XLogRegisterData((char *) &xlrec, SizeOfBtreeNewroot);
    2539             : 
    2540        2396 :         XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
    2541        2396 :         XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
    2542        2396 :         XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
    2543             : 
    2544             :         Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
    2545        2396 :         md.version = metad->btm_version;
    2546        2396 :         md.root = rootblknum;
    2547        2396 :         md.level = metad->btm_level;
    2548        2396 :         md.fastroot = rootblknum;
    2549        2396 :         md.fastlevel = metad->btm_level;
    2550        2396 :         md.oldest_btpo_xact = metad->btm_oldest_btpo_xact;
    2551        2396 :         md.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
    2552        2396 :         md.allequalimage = metad->btm_allequalimage;
    2553             : 
    2554        2396 :         XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
    2555             : 
    2556             :         /*
    2557             :          * Direct access to page is not good but faster - we should implement
    2558             :          * some new func in page API.
    2559             :          */
    2560        4792 :         XLogRegisterBufData(0,
    2561        2396 :                             (char *) rootpage + ((PageHeader) rootpage)->pd_upper,
    2562        2396 :                             ((PageHeader) rootpage)->pd_special -
    2563        2396 :                             ((PageHeader) rootpage)->pd_upper);
    2564             : 
    2565        2396 :         recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
    2566             : 
    2567        2396 :         PageSetLSN(lpage, recptr);
    2568        2396 :         PageSetLSN(rootpage, recptr);
    2569        2396 :         PageSetLSN(metapg, recptr);
    2570             :     }
    2571             : 
    2572        2410 :     END_CRIT_SECTION();
    2573             : 
    2574             :     /* done with metapage */
    2575        2410 :     _bt_relbuf(rel, metabuf);
    2576             : 
    2577        2410 :     pfree(left_item);
    2578        2410 :     pfree(right_item);
    2579             : 
    2580        2410 :     return rootbuf;
    2581             : }
    2582             : 
    2583             : /*
    2584             :  *  _bt_pgaddtup() -- add a data item to a particular page during split.
    2585             :  *
    2586             :  *      The difference between this routine and a bare PageAddItem call is
    2587             :  *      that this code can deal with the first data item on an internal btree
    2588             :  *      page in passing.  This data item (which is called "firstright" within
    2589             :  *      _bt_split()) has a key that must be treated as minus infinity after
    2590             :  *      the split.  Therefore, we truncate away all attributes when caller
    2591             :  *      specifies it's the first data item on page (downlink is not changed,
    2592             :  *      though).  This extra step is only needed for the right page of an
    2593             :  *      internal page split.  There is no need to do this for the first data
    2594             :  *      item on the existing/left page, since that will already have been
    2595             :  *      truncated during an earlier page split.
    2596             :  *
    2597             :  *      See _bt_split() for a high level explanation of why we truncate here.
    2598             :  *      Note that this routine has nothing to do with suffix truncation,
    2599             :  *      despite using some of the same infrastructure.
    2600             :  */
    2601             : static inline bool
    2602    13940140 : _bt_pgaddtup(Page page,
    2603             :              Size itemsize,
    2604             :              IndexTuple itup,
    2605             :              OffsetNumber itup_off,
    2606             :              bool newfirstdataitem)
    2607             : {
    2608             :     IndexTupleData trunctuple;
    2609             : 
    2610    13940140 :     if (newfirstdataitem)
    2611             :     {
    2612         100 :         trunctuple = *itup;
    2613         100 :         trunctuple.t_info = sizeof(IndexTupleData);
    2614         100 :         BTreeTupleSetNAtts(&trunctuple, 0, false);
    2615         100 :         itup = &trunctuple;
    2616         100 :         itemsize = sizeof(IndexTupleData);
    2617             :     }
    2618             : 
    2619    13940140 :     if (unlikely(PageAddItem(page, (Item) itup, itemsize, itup_off, false,
    2620             :                              false) == InvalidOffsetNumber))
    2621           0 :         return false;
    2622             : 
    2623    13940140 :     return true;
    2624             : }
    2625             : 
    2626             : /*
    2627             :  * _bt_vacuum_one_page - vacuum just one index page.
    2628             :  *
    2629             :  * Try to remove LP_DEAD items from the given page.  The passed buffer
    2630             :  * must be exclusive-locked, but unlike a real VACUUM, we don't need a
    2631             :  * super-exclusive "cleanup" lock (see nbtree/README).
    2632             :  */
    2633             : static void
    2634        3692 : _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel)
    2635             : {
    2636             :     OffsetNumber deletable[MaxIndexTuplesPerPage];
    2637        3692 :     int         ndeletable = 0;
    2638             :     OffsetNumber offnum,
    2639             :                 minoff,
    2640             :                 maxoff;
    2641        3692 :     Page        page = BufferGetPage(buffer);
    2642        3692 :     BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
    2643             : 
    2644             :     Assert(P_ISLEAF(opaque));
    2645             : 
    2646             :     /*
    2647             :      * Scan over all items to see which ones need to be deleted according to
    2648             :      * LP_DEAD flags.
    2649             :      */
    2650        3692 :     minoff = P_FIRSTDATAKEY(opaque);
    2651        3692 :     maxoff = PageGetMaxOffsetNumber(page);
    2652     1010330 :     for (offnum = minoff;
    2653             :          offnum <= maxoff;
    2654     1006638 :          offnum = OffsetNumberNext(offnum))
    2655             :     {
    2656     1006638 :         ItemId      itemId = PageGetItemId(page, offnum);
    2657             : 
    2658     1006638 :         if (ItemIdIsDead(itemId))
    2659       52278 :             deletable[ndeletable++] = offnum;
    2660             :     }
    2661             : 
    2662        3692 :     if (ndeletable > 0)
    2663        3692 :         _bt_delitems_delete(rel, buffer, deletable, ndeletable, heapRel);
    2664             : 
    2665             :     /*
    2666             :      * Note: if we didn't find any LP_DEAD items, then the page's
    2667             :      * BTP_HAS_GARBAGE hint bit is falsely set.  We do not bother expending a
    2668             :      * separate write to clear it, however.  We will clear it when we split
    2669             :      * the page, or when deduplication runs.
    2670             :      */
    2671        3692 : }

Generated by: LCOV version 1.13