LCOV - code coverage report
Current view: top level - src/backend/access/nbtree - nbtpage.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 89.1 % 834 743
Test Date: 2026-02-28 13:14:45 Functions: 97.0 % 33 32
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * nbtpage.c
       4              :  *    BTree-specific page management code for the Postgres btree access
       5              :  *    method.
       6              :  *
       7              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       8              :  * Portions Copyright (c) 1994, Regents of the University of California
       9              :  *
      10              :  *
      11              :  * IDENTIFICATION
      12              :  *    src/backend/access/nbtree/nbtpage.c
      13              :  *
      14              :  *  NOTES
      15              :  *     Postgres btree pages look like ordinary relation pages.  The opaque
      16              :  *     data at high addresses includes pointers to left and right siblings
      17              :  *     and flag data describing page state.  The first page in a btree, page
      18              :  *     zero, is special -- it stores meta-information describing the tree.
      19              :  *     Pages one and higher store the actual tree data.
      20              :  *
      21              :  *-------------------------------------------------------------------------
      22              :  */
      23              : #include "postgres.h"
      24              : 
      25              : #include "access/nbtree.h"
      26              : #include "access/nbtxlog.h"
      27              : #include "access/tableam.h"
      28              : #include "access/transam.h"
      29              : #include "access/xlog.h"
      30              : #include "access/xloginsert.h"
      31              : #include "common/int.h"
      32              : #include "miscadmin.h"
      33              : #include "storage/indexfsm.h"
      34              : #include "storage/predicate.h"
      35              : #include "storage/procarray.h"
      36              : #include "utils/injection_point.h"
      37              : #include "utils/memdebug.h"
      38              : #include "utils/memutils.h"
      39              : #include "utils/snapmgr.h"
      40              : 
      41              : static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf);
      42              : static void _bt_delitems_delete(Relation rel, Buffer buf,
      43              :                                 TransactionId snapshotConflictHorizon,
      44              :                                 bool isCatalogRel,
      45              :                                 OffsetNumber *deletable, int ndeletable,
      46              :                                 BTVacuumPosting *updatable, int nupdatable);
      47              : static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
      48              :                                  OffsetNumber *updatedoffsets,
      49              :                                  Size *updatedbuflen, bool needswal);
      50              : static bool _bt_mark_page_halfdead(Relation rel, Relation heaprel,
      51              :                                    Buffer leafbuf, BTStack stack);
      52              : static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
      53              :                                      BlockNumber scanblkno,
      54              :                                      bool *rightsib_empty,
      55              :                                      BTVacState *vstate);
      56              : static bool _bt_lock_subtree_parent(Relation rel, Relation heaprel,
      57              :                                     BlockNumber child, BTStack stack,
      58              :                                     Buffer *subtreeparent, OffsetNumber *poffset,
      59              :                                     BlockNumber *topparent,
      60              :                                     BlockNumber *topparentrightsib);
      61              : static void _bt_pendingfsm_add(BTVacState *vstate, BlockNumber target,
      62              :                                FullTransactionId safexid);
      63              : 
      64              : /*
      65              :  *  _bt_initmetapage() -- Fill a page buffer with a correct metapage image
      66              :  */
      67              : void
      68        26176 : _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
      69              :                  bool allequalimage)
      70              : {
      71              :     BTMetaPageData *metad;
      72              :     BTPageOpaque metaopaque;
      73              : 
      74        26176 :     _bt_pageinit(page, BLCKSZ);
      75              : 
      76        26176 :     metad = BTPageGetMeta(page);
      77        26176 :     metad->btm_magic = BTREE_MAGIC;
      78        26176 :     metad->btm_version = BTREE_VERSION;
      79        26176 :     metad->btm_root = rootbknum;
      80        26176 :     metad->btm_level = level;
      81        26176 :     metad->btm_fastroot = rootbknum;
      82        26176 :     metad->btm_fastlevel = level;
      83        26176 :     metad->btm_last_cleanup_num_delpages = 0;
      84        26176 :     metad->btm_last_cleanup_num_heap_tuples = -1.0;
      85        26176 :     metad->btm_allequalimage = allequalimage;
      86              : 
      87        26176 :     metaopaque = BTPageGetOpaque(page);
      88        26176 :     metaopaque->btpo_flags = BTP_META;
      89              : 
      90              :     /*
      91              :      * Set pd_lower just past the end of the metadata.  This is essential,
      92              :      * because without doing so, metadata will be lost if xlog.c compresses
      93              :      * the page.
      94              :      */
      95        26176 :     ((PageHeader) page)->pd_lower =
      96        26176 :         ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
      97        26176 : }
      98              : 
      99              : /*
     100              :  *  _bt_upgrademetapage() -- Upgrade a meta-page from an old format to version
     101              :  *      3, the last version that can be updated without broadly affecting
     102              :  *      on-disk compatibility.  (A REINDEX is required to upgrade to v4.)
     103              :  *
     104              :  *      This routine does purely in-memory image upgrade.  Caller is
     105              :  *      responsible for locking, WAL-logging etc.
     106              :  */
     107              : void
     108            0 : _bt_upgrademetapage(Page page)
     109              : {
     110              :     BTMetaPageData *metad;
     111              :     BTPageOpaque metaopaque PG_USED_FOR_ASSERTS_ONLY;
     112              : 
     113            0 :     metad = BTPageGetMeta(page);
     114            0 :     metaopaque = BTPageGetOpaque(page);
     115              : 
     116              :     /* It must be really a meta page of upgradable version */
     117              :     Assert(metaopaque->btpo_flags & BTP_META);
     118              :     Assert(metad->btm_version < BTREE_NOVAC_VERSION);
     119              :     Assert(metad->btm_version >= BTREE_MIN_VERSION);
     120              : 
     121              :     /* Set version number and fill extra fields added into version 3 */
     122            0 :     metad->btm_version = BTREE_NOVAC_VERSION;
     123            0 :     metad->btm_last_cleanup_num_delpages = 0;
     124            0 :     metad->btm_last_cleanup_num_heap_tuples = -1.0;
     125              :     /* Only a REINDEX can set this field */
     126              :     Assert(!metad->btm_allequalimage);
     127            0 :     metad->btm_allequalimage = false;
     128              : 
     129              :     /* Adjust pd_lower (see _bt_initmetapage() for details) */
     130            0 :     ((PageHeader) page)->pd_lower =
     131            0 :         ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
     132            0 : }
     133              : 
     134              : /*
     135              :  * Get metadata from share-locked buffer containing metapage, while performing
     136              :  * standard sanity checks.
     137              :  *
     138              :  * Callers that cache data returned here in local cache should note that an
     139              :  * on-the-fly upgrade using _bt_upgrademetapage() can change the version field
     140              :  * and BTREE_NOVAC_VERSION specific fields without invalidating local cache.
     141              :  */
     142              : static BTMetaPageData *
     143      1007696 : _bt_getmeta(Relation rel, Buffer metabuf)
     144              : {
     145              :     Page        metapg;
     146              :     BTPageOpaque metaopaque;
     147              :     BTMetaPageData *metad;
     148              : 
     149      1007696 :     metapg = BufferGetPage(metabuf);
     150      1007696 :     metaopaque = BTPageGetOpaque(metapg);
     151      1007696 :     metad = BTPageGetMeta(metapg);
     152              : 
     153              :     /* sanity-check the metapage */
     154      1007696 :     if (!P_ISMETA(metaopaque) ||
     155      1007696 :         metad->btm_magic != BTREE_MAGIC)
     156            0 :         ereport(ERROR,
     157              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     158              :                  errmsg("index \"%s\" is not a btree",
     159              :                         RelationGetRelationName(rel))));
     160              : 
     161      1007696 :     if (metad->btm_version < BTREE_MIN_VERSION ||
     162      1007696 :         metad->btm_version > BTREE_VERSION)
     163            0 :         ereport(ERROR,
     164              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     165              :                  errmsg("version mismatch in index \"%s\": file version %d, "
     166              :                         "current version %d, minimal supported version %d",
     167              :                         RelationGetRelationName(rel),
     168              :                         metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
     169              : 
     170      1007696 :     return metad;
     171              : }
     172              : 
     173              : /*
     174              :  * _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup
     175              :  *
     176              :  * Called by btvacuumcleanup when btbulkdelete was never called because no
     177              :  * index tuples needed to be deleted.
     178              :  */
     179              : bool
     180       153343 : _bt_vacuum_needs_cleanup(Relation rel)
     181              : {
     182              :     Buffer      metabuf;
     183              :     Page        metapg;
     184              :     BTMetaPageData *metad;
     185              :     uint32      btm_version;
     186              :     BlockNumber prev_num_delpages;
     187              : 
     188              :     /*
     189              :      * Copy details from metapage to local variables quickly.
     190              :      *
     191              :      * Note that we deliberately avoid using cached version of metapage here.
     192              :      */
     193       153343 :     metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
     194       153343 :     metapg = BufferGetPage(metabuf);
     195       153343 :     metad = BTPageGetMeta(metapg);
     196       153343 :     btm_version = metad->btm_version;
     197              : 
     198       153343 :     if (btm_version < BTREE_NOVAC_VERSION)
     199              :     {
     200              :         /*
     201              :          * Metapage needs to be dynamically upgraded to store fields that are
     202              :          * only present when btm_version >= BTREE_NOVAC_VERSION
     203              :          */
     204            0 :         _bt_relbuf(rel, metabuf);
     205            0 :         return true;
     206              :     }
     207              : 
     208       153343 :     prev_num_delpages = metad->btm_last_cleanup_num_delpages;
     209       153343 :     _bt_relbuf(rel, metabuf);
     210              : 
     211              :     /*
     212              :      * Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
     213              :      * total size of the index.  We can reasonably expect (though are not
     214              :      * guaranteed) to be able to recycle this many pages if we decide to do a
     215              :      * btvacuumscan call during the ongoing btvacuumcleanup.  For further
     216              :      * details see the nbtree/README section on placing deleted pages in the
     217              :      * FSM.
     218              :      */
     219       153343 :     if (prev_num_delpages > 0 &&
     220            6 :         prev_num_delpages > RelationGetNumberOfBlocks(rel) / 20)
     221            6 :         return true;
     222              : 
     223       153337 :     return false;
     224              : }
     225              : 
     226              : /*
     227              :  * _bt_set_cleanup_info() -- Update metapage for btvacuumcleanup.
     228              :  *
     229              :  * Called at the end of btvacuumcleanup, when num_delpages value has been
     230              :  * finalized.
     231              :  */
     232              : void
     233         1225 : _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
     234              : {
     235              :     Buffer      metabuf;
     236              :     Page        metapg;
     237              :     BTMetaPageData *metad;
     238              : 
     239              :     /*
     240              :      * On-disk compatibility note: The btm_last_cleanup_num_delpages metapage
     241              :      * field started out as a TransactionId field called btm_oldest_btpo_xact.
     242              :      * Both "versions" are just uint32 fields.  It was convenient to repurpose
     243              :      * the field when we began to use 64-bit XIDs in deleted pages.
     244              :      *
     245              :      * It's possible that a pg_upgrade'd database will contain an XID value in
     246              :      * what is now recognized as the metapage's btm_last_cleanup_num_delpages
     247              :      * field.  _bt_vacuum_needs_cleanup() may even believe that this value
     248              :      * indicates that there are lots of pages that it needs to recycle, when
     249              :      * in reality there are only one or two.  The worst that can happen is
     250              :      * that there will be a call to btvacuumscan a little earlier, which will
     251              :      * set btm_last_cleanup_num_delpages to a sane value when we're called.
     252              :      *
     253              :      * Note also that the metapage's btm_last_cleanup_num_heap_tuples field is
     254              :      * no longer used as of PostgreSQL 14.  We set it to -1.0 on rewrite, just
     255              :      * to be consistent.
     256              :      */
     257         1225 :     metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
     258         1225 :     metapg = BufferGetPage(metabuf);
     259         1225 :     metad = BTPageGetMeta(metapg);
     260              : 
     261              :     /* Don't miss chance to upgrade index/metapage when BTREE_MIN_VERSION */
     262         1225 :     if (metad->btm_version >= BTREE_NOVAC_VERSION &&
     263         1225 :         metad->btm_last_cleanup_num_delpages == num_delpages)
     264              :     {
     265              :         /* Usually means index continues to have num_delpages of 0 */
     266         1140 :         _bt_relbuf(rel, metabuf);
     267         1140 :         return;
     268              :     }
     269              : 
     270              :     /* trade in our read lock for a write lock */
     271           85 :     _bt_unlockbuf(rel, metabuf);
     272           85 :     _bt_lockbuf(rel, metabuf, BT_WRITE);
     273              : 
     274           85 :     START_CRIT_SECTION();
     275              : 
     276              :     /* upgrade meta-page if needed */
     277           85 :     if (metad->btm_version < BTREE_NOVAC_VERSION)
     278            0 :         _bt_upgrademetapage(metapg);
     279              : 
     280              :     /* update cleanup-related information */
     281           85 :     metad->btm_last_cleanup_num_delpages = num_delpages;
     282           85 :     metad->btm_last_cleanup_num_heap_tuples = -1.0;
     283           85 :     MarkBufferDirty(metabuf);
     284              : 
     285              :     /* write wal record if needed */
     286           85 :     if (RelationNeedsWAL(rel))
     287              :     {
     288              :         xl_btree_metadata md;
     289              :         XLogRecPtr  recptr;
     290              : 
     291           85 :         XLogBeginInsert();
     292           85 :         XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
     293              : 
     294              :         Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
     295           85 :         md.version = metad->btm_version;
     296           85 :         md.root = metad->btm_root;
     297           85 :         md.level = metad->btm_level;
     298           85 :         md.fastroot = metad->btm_fastroot;
     299           85 :         md.fastlevel = metad->btm_fastlevel;
     300           85 :         md.last_cleanup_num_delpages = num_delpages;
     301           85 :         md.allequalimage = metad->btm_allequalimage;
     302              : 
     303           85 :         XLogRegisterBufData(0, &md, sizeof(xl_btree_metadata));
     304              : 
     305           85 :         recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_META_CLEANUP);
     306              : 
     307           85 :         PageSetLSN(metapg, recptr);
     308              :     }
     309              : 
     310           85 :     END_CRIT_SECTION();
     311              : 
     312           85 :     _bt_relbuf(rel, metabuf);
     313              : }
     314              : 
     315              : /*
     316              :  *  _bt_getroot() -- Get the root page of the btree.
     317              :  *
     318              :  *      Since the root page can move around the btree file, we have to read
     319              :  *      its location from the metadata page, and then read the root page
     320              :  *      itself.  If no root page exists yet, we have to create one.
     321              :  *
     322              :  *      The access type parameter (BT_READ or BT_WRITE) controls whether
     323              :  *      a new root page will be created or not.  If access = BT_READ,
     324              :  *      and no root page exists, we just return InvalidBuffer.  For
     325              :  *      BT_WRITE, we try to create the root page if it doesn't exist.
     326              :  *      NOTE that the returned root page will have only a read lock set
     327              :  *      on it even if access = BT_WRITE!
     328              :  *
     329              :  *      If access = BT_WRITE, heaprel must be set; otherwise caller can just
     330              :  *      pass NULL.  See _bt_allocbuf for an explanation.
     331              :  *
     332              :  *      The returned page is not necessarily the true root --- it could be
     333              :  *      a "fast root" (a page that is alone in its level due to deletions).
     334              :  *      Also, if the root page is split while we are "in flight" to it,
     335              :  *      what we will return is the old root, which is now just the leftmost
     336              :  *      page on a probably-not-very-wide level.  For most purposes this is
     337              :  *      as good as or better than the true root, so we do not bother to
     338              :  *      insist on finding the true root.  We do, however, guarantee to
     339              :  *      return a live (not deleted or half-dead) page.
     340              :  *
     341              :  *      On successful return, the root page is pinned and read-locked.
     342              :  *      The metadata page is not locked or pinned on exit.
     343              :  */
     344              : Buffer
     345     12564947 : _bt_getroot(Relation rel, Relation heaprel, int access)
     346              : {
     347              :     Buffer      metabuf;
     348              :     Buffer      rootbuf;
     349              :     Page        rootpage;
     350              :     BTPageOpaque rootopaque;
     351              :     BlockNumber rootblkno;
     352              :     uint32      rootlevel;
     353              :     BTMetaPageData *metad;
     354              : 
     355              :     Assert(access == BT_READ || heaprel != NULL);
     356              : 
     357              :     /*
     358              :      * Try to use previously-cached metapage data to find the root.  This
     359              :      * normally saves one buffer access per index search, which is a very
     360              :      * helpful savings in bufmgr traffic and hence contention.
     361              :      */
     362     12564947 :     if (rel->rd_amcache != NULL)
     363              :     {
     364     12261929 :         metad = (BTMetaPageData *) rel->rd_amcache;
     365              :         /* We shouldn't have cached it if any of these fail */
     366              :         Assert(metad->btm_magic == BTREE_MAGIC);
     367              :         Assert(metad->btm_version >= BTREE_MIN_VERSION);
     368              :         Assert(metad->btm_version <= BTREE_VERSION);
     369              :         Assert(!metad->btm_allequalimage ||
     370              :                metad->btm_version > BTREE_NOVAC_VERSION);
     371              :         Assert(metad->btm_root != P_NONE);
     372              : 
     373     12261929 :         rootblkno = metad->btm_fastroot;
     374              :         Assert(rootblkno != P_NONE);
     375     12261929 :         rootlevel = metad->btm_fastlevel;
     376              : 
     377     12261929 :         rootbuf = _bt_getbuf(rel, rootblkno, BT_READ);
     378     12261929 :         rootpage = BufferGetPage(rootbuf);
     379     12261929 :         rootopaque = BTPageGetOpaque(rootpage);
     380              : 
     381              :         /*
     382              :          * Since the cache might be stale, we check the page more carefully
     383              :          * here than normal.  We *must* check that it's not deleted. If it's
     384              :          * not alone on its level, then we reject too --- this may be overly
     385              :          * paranoid but better safe than sorry.  Note we don't check P_ISROOT,
     386              :          * because that's not set in a "fast root".
     387              :          */
     388     12261929 :         if (!P_IGNORE(rootopaque) &&
     389     12261929 :             rootopaque->btpo_level == rootlevel &&
     390     12261929 :             P_LEFTMOST(rootopaque) &&
     391     12261929 :             P_RIGHTMOST(rootopaque))
     392              :         {
     393              :             /* OK, accept cached page as the root */
     394     12261115 :             return rootbuf;
     395              :         }
     396          814 :         _bt_relbuf(rel, rootbuf);
     397              :         /* Cache is stale, throw it away */
     398          814 :         if (rel->rd_amcache)
     399          814 :             pfree(rel->rd_amcache);
     400          814 :         rel->rd_amcache = NULL;
     401              :     }
     402              : 
     403       303832 :     metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
     404       303832 :     metad = _bt_getmeta(rel, metabuf);
     405              : 
     406              :     /* if no root page initialized yet, do it */
     407       303832 :     if (metad->btm_root == P_NONE)
     408              :     {
     409              :         Page        metapg;
     410              : 
     411              :         /* If access = BT_READ, caller doesn't want us to create root yet */
     412       302755 :         if (access == BT_READ)
     413              :         {
     414       296225 :             _bt_relbuf(rel, metabuf);
     415       296225 :             return InvalidBuffer;
     416              :         }
     417              : 
     418              :         /* trade in our read lock for a write lock */
     419         6530 :         _bt_unlockbuf(rel, metabuf);
     420         6530 :         _bt_lockbuf(rel, metabuf, BT_WRITE);
     421              : 
     422              :         /*
     423              :          * Race condition:  if someone else initialized the metadata between
     424              :          * the time we released the read lock and acquired the write lock, we
     425              :          * must avoid doing it again.
     426              :          */
     427         6530 :         if (metad->btm_root != P_NONE)
     428              :         {
     429              :             /*
     430              :              * Metadata initialized by someone else.  In order to guarantee no
     431              :              * deadlocks, we have to release the metadata page and start all
     432              :              * over again.  (Is that really true? But it's hardly worth trying
     433              :              * to optimize this case.)
     434              :              */
     435            0 :             _bt_relbuf(rel, metabuf);
     436            0 :             return _bt_getroot(rel, heaprel, access);
     437              :         }
     438              : 
     439              :         /*
     440              :          * Get, initialize, write, and leave a lock of the appropriate type on
     441              :          * the new root page.  Since this is the first page in the tree, it's
     442              :          * a leaf as well as the root.
     443              :          */
     444         6530 :         rootbuf = _bt_allocbuf(rel, heaprel);
     445         6530 :         rootblkno = BufferGetBlockNumber(rootbuf);
     446         6530 :         rootpage = BufferGetPage(rootbuf);
     447         6530 :         rootopaque = BTPageGetOpaque(rootpage);
     448         6530 :         rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
     449         6530 :         rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
     450         6530 :         rootopaque->btpo_level = 0;
     451         6530 :         rootopaque->btpo_cycleid = 0;
     452              :         /* Get raw page pointer for metapage */
     453         6530 :         metapg = BufferGetPage(metabuf);
     454              : 
     455              :         /* NO ELOG(ERROR) till meta is updated */
     456         6530 :         START_CRIT_SECTION();
     457              : 
     458              :         /* upgrade metapage if needed */
     459         6530 :         if (metad->btm_version < BTREE_NOVAC_VERSION)
     460            0 :             _bt_upgrademetapage(metapg);
     461              : 
     462         6530 :         metad->btm_root = rootblkno;
     463         6530 :         metad->btm_level = 0;
     464         6530 :         metad->btm_fastroot = rootblkno;
     465         6530 :         metad->btm_fastlevel = 0;
     466         6530 :         metad->btm_last_cleanup_num_delpages = 0;
     467         6530 :         metad->btm_last_cleanup_num_heap_tuples = -1.0;
     468              : 
     469         6530 :         MarkBufferDirty(rootbuf);
     470         6530 :         MarkBufferDirty(metabuf);
     471              : 
     472              :         /* XLOG stuff */
     473         6530 :         if (RelationNeedsWAL(rel))
     474              :         {
     475              :             xl_btree_newroot xlrec;
     476              :             XLogRecPtr  recptr;
     477              :             xl_btree_metadata md;
     478              : 
     479         6294 :             XLogBeginInsert();
     480         6294 :             XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
     481         6294 :             XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
     482              : 
     483              :             Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
     484         6294 :             md.version = metad->btm_version;
     485         6294 :             md.root = rootblkno;
     486         6294 :             md.level = 0;
     487         6294 :             md.fastroot = rootblkno;
     488         6294 :             md.fastlevel = 0;
     489         6294 :             md.last_cleanup_num_delpages = 0;
     490         6294 :             md.allequalimage = metad->btm_allequalimage;
     491              : 
     492         6294 :             XLogRegisterBufData(2, &md, sizeof(xl_btree_metadata));
     493              : 
     494         6294 :             xlrec.rootblk = rootblkno;
     495         6294 :             xlrec.level = 0;
     496              : 
     497         6294 :             XLogRegisterData(&xlrec, SizeOfBtreeNewroot);
     498              : 
     499         6294 :             recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
     500              : 
     501         6294 :             PageSetLSN(rootpage, recptr);
     502         6294 :             PageSetLSN(metapg, recptr);
     503              :         }
     504              : 
     505         6530 :         END_CRIT_SECTION();
     506              : 
     507              :         /*
     508              :          * swap root write lock for read lock.  There is no danger of anyone
     509              :          * else accessing the new root page while it's unlocked, since no one
     510              :          * else knows where it is yet.
     511              :          */
     512         6530 :         _bt_unlockbuf(rel, rootbuf);
     513         6530 :         _bt_lockbuf(rel, rootbuf, BT_READ);
     514              : 
     515              :         /* okay, metadata is correct, release lock on it without caching */
     516         6530 :         _bt_relbuf(rel, metabuf);
     517              :     }
     518              :     else
     519              :     {
     520         1077 :         rootblkno = metad->btm_fastroot;
     521              :         Assert(rootblkno != P_NONE);
     522         1077 :         rootlevel = metad->btm_fastlevel;
     523              : 
     524              :         /*
     525              :          * Cache the metapage data for next time
     526              :          */
     527         1077 :         rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
     528              :                                              sizeof(BTMetaPageData));
     529         1077 :         memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
     530              : 
     531              :         /*
     532              :          * We are done with the metapage; arrange to release it via first
     533              :          * _bt_relandgetbuf call
     534              :          */
     535         1077 :         rootbuf = metabuf;
     536              : 
     537              :         for (;;)
     538              :         {
     539         1077 :             rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
     540         1077 :             rootpage = BufferGetPage(rootbuf);
     541         1077 :             rootopaque = BTPageGetOpaque(rootpage);
     542              : 
     543         1077 :             if (!P_IGNORE(rootopaque))
     544         1077 :                 break;
     545              : 
     546              :             /* it's dead, Jim.  step right one page */
     547            0 :             if (P_RIGHTMOST(rootopaque))
     548            0 :                 elog(ERROR, "no live root page found in index \"%s\"",
     549              :                      RelationGetRelationName(rel));
     550            0 :             rootblkno = rootopaque->btpo_next;
     551              :         }
     552              : 
     553         1077 :         if (rootopaque->btpo_level != rootlevel)
     554            0 :             elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
     555              :                  rootblkno, RelationGetRelationName(rel),
     556              :                  rootopaque->btpo_level, rootlevel);
     557              :     }
     558              : 
     559              :     /*
     560              :      * By here, we have a pin and read lock on the root page, and no lock set
     561              :      * on the metadata page.  Return the root page's buffer.
     562              :      */
     563         7607 :     return rootbuf;
     564              : }
     565              : 
     566              : /*
     567              :  *  _bt_gettrueroot() -- Get the true root page of the btree.
     568              :  *
     569              :  *      This is the same as the BT_READ case of _bt_getroot(), except
     570              :  *      we follow the true-root link not the fast-root link.
     571              :  *
     572              :  * By the time we acquire lock on the root page, it might have been split and
     573              :  * not be the true root anymore.  This is okay for the present uses of this
     574              :  * routine; we only really need to be able to move up at least one tree level
     575              :  * from whatever non-root page we were at.  If we ever do need to lock the
     576              :  * one true root page, we could loop here, re-reading the metapage on each
     577              :  * failure.  (Note that it wouldn't do to hold the lock on the metapage while
     578              :  * moving to the root --- that'd deadlock against any concurrent root split.)
     579              :  */
     580              : Buffer
     581           12 : _bt_gettrueroot(Relation rel)
     582              : {
     583              :     Buffer      metabuf;
     584              :     Page        metapg;
     585              :     BTPageOpaque metaopaque;
     586              :     Buffer      rootbuf;
     587              :     Page        rootpage;
     588              :     BTPageOpaque rootopaque;
     589              :     BlockNumber rootblkno;
     590              :     uint32      rootlevel;
     591              :     BTMetaPageData *metad;
     592              : 
     593              :     /*
     594              :      * We don't try to use cached metapage data here, since (a) this path is
     595              :      * not performance-critical, and (b) if we are here it suggests our cache
     596              :      * is out-of-date anyway.  In light of point (b), it's probably safest to
     597              :      * actively flush any cached metapage info.
     598              :      */
     599           12 :     if (rel->rd_amcache)
     600           12 :         pfree(rel->rd_amcache);
     601           12 :     rel->rd_amcache = NULL;
     602              : 
     603           12 :     metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
     604           12 :     metapg = BufferGetPage(metabuf);
     605           12 :     metaopaque = BTPageGetOpaque(metapg);
     606           12 :     metad = BTPageGetMeta(metapg);
     607              : 
     608           12 :     if (!P_ISMETA(metaopaque) ||
     609           12 :         metad->btm_magic != BTREE_MAGIC)
     610            0 :         ereport(ERROR,
     611              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     612              :                  errmsg("index \"%s\" is not a btree",
     613              :                         RelationGetRelationName(rel))));
     614              : 
     615           12 :     if (metad->btm_version < BTREE_MIN_VERSION ||
     616           12 :         metad->btm_version > BTREE_VERSION)
     617            0 :         ereport(ERROR,
     618              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     619              :                  errmsg("version mismatch in index \"%s\": file version %d, "
     620              :                         "current version %d, minimal supported version %d",
     621              :                         RelationGetRelationName(rel),
     622              :                         metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
     623              : 
     624              :     /* if no root page initialized yet, fail */
     625           12 :     if (metad->btm_root == P_NONE)
     626              :     {
     627            0 :         _bt_relbuf(rel, metabuf);
     628            0 :         return InvalidBuffer;
     629              :     }
     630              : 
     631           12 :     rootblkno = metad->btm_root;
     632           12 :     rootlevel = metad->btm_level;
     633              : 
     634              :     /*
     635              :      * We are done with the metapage; arrange to release it via first
     636              :      * _bt_relandgetbuf call
     637              :      */
     638           12 :     rootbuf = metabuf;
     639              : 
     640              :     for (;;)
     641              :     {
     642           12 :         rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
     643           12 :         rootpage = BufferGetPage(rootbuf);
     644           12 :         rootopaque = BTPageGetOpaque(rootpage);
     645              : 
     646           12 :         if (!P_IGNORE(rootopaque))
     647           12 :             break;
     648              : 
     649              :         /* it's dead, Jim.  step right one page */
     650            0 :         if (P_RIGHTMOST(rootopaque))
     651            0 :             elog(ERROR, "no live root page found in index \"%s\"",
     652              :                  RelationGetRelationName(rel));
     653            0 :         rootblkno = rootopaque->btpo_next;
     654              :     }
     655              : 
     656           12 :     if (rootopaque->btpo_level != rootlevel)
     657            0 :         elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
     658              :              rootblkno, RelationGetRelationName(rel),
     659              :              rootopaque->btpo_level, rootlevel);
     660              : 
     661           12 :     return rootbuf;
     662              : }
     663              : 
     664              : /*
     665              :  *  _bt_getrootheight() -- Get the height of the btree search tree.
     666              :  *
     667              :  *      We return the level (counting from zero) of the current fast root.
     668              :  *      This represents the number of tree levels we'd have to descend through
     669              :  *      to start any btree index search.
     670              :  *
     671              :  *      This is used by the planner for cost-estimation purposes.  Since it's
     672              :  *      only an estimate, slightly-stale data is fine, hence we don't worry
     673              :  *      about updating previously cached data.
     674              :  */
     675              : int
     676      2438844 : _bt_getrootheight(Relation rel)
     677              : {
     678              :     BTMetaPageData *metad;
     679              : 
     680      2438844 :     if (rel->rd_amcache == NULL)
     681              :     {
     682              :         Buffer      metabuf;
     683              : 
     684        48921 :         metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
     685        48921 :         metad = _bt_getmeta(rel, metabuf);
     686              : 
     687              :         /*
     688              :          * If there's no root page yet, _bt_getroot() doesn't expect a cache
     689              :          * to be made, so just stop here and report the index height is zero.
     690              :          * (XXX perhaps _bt_getroot() should be changed to allow this case.)
     691              :          */
     692        48921 :         if (metad->btm_root == P_NONE)
     693              :         {
     694        23891 :             _bt_relbuf(rel, metabuf);
     695        23891 :             return 0;
     696              :         }
     697              : 
     698              :         /*
     699              :          * Cache the metapage data for next time
     700              :          */
     701        25030 :         rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
     702              :                                              sizeof(BTMetaPageData));
     703        25030 :         memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
     704        25030 :         _bt_relbuf(rel, metabuf);
     705              :     }
     706              : 
     707              :     /* Get cached page */
     708      2414953 :     metad = (BTMetaPageData *) rel->rd_amcache;
     709              :     /* We shouldn't have cached it if any of these fail */
     710              :     Assert(metad->btm_magic == BTREE_MAGIC);
     711              :     Assert(metad->btm_version >= BTREE_MIN_VERSION);
     712              :     Assert(metad->btm_version <= BTREE_VERSION);
     713              :     Assert(!metad->btm_allequalimage ||
     714              :            metad->btm_version > BTREE_NOVAC_VERSION);
     715              :     Assert(metad->btm_fastroot != P_NONE);
     716              : 
     717      2414953 :     return metad->btm_fastlevel;
     718              : }
     719              : 
     720              : /*
     721              :  *  _bt_metaversion() -- Get version/status info from metapage.
     722              :  *
     723              :  *      Sets caller's *heapkeyspace and *allequalimage arguments using data
     724              :  *      from the B-Tree metapage (could be locally-cached version).  This
     725              :  *      information needs to be stashed in insertion scankey, so we provide a
     726              :  *      single function that fetches both at once.
     727              :  *
     728              :  *      This is used to determine the rules that must be used to descend a
     729              :  *      btree.  Version 4 indexes treat heap TID as a tiebreaker attribute.
     730              :  *      pg_upgrade'd version 3 indexes need extra steps to preserve reasonable
     731              :  *      performance when inserting a new BTScanInsert-wise duplicate tuple
     732              :  *      among many leaf pages already full of such duplicates.
     733              :  *
     734              :  *      Also sets allequalimage field, which indicates whether or not it is
     735              :  *      safe to apply deduplication.  We rely on the assumption that
     736              :  *      btm_allequalimage will be zero'ed on heapkeyspace indexes that were
     737              :  *      pg_upgrade'd from Postgres 12.
     738              :  */
     739              : void
     740     14587204 : _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
     741              : {
     742              :     BTMetaPageData *metad;
     743              : 
     744     14587204 :     if (rel->rd_amcache == NULL)
     745              :     {
     746              :         Buffer      metabuf;
     747              : 
     748       654943 :         metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
     749       654943 :         metad = _bt_getmeta(rel, metabuf);
     750              : 
     751              :         /*
     752              :          * If there's no root page yet, _bt_getroot() doesn't expect a cache
     753              :          * to be made, so just stop here.  (XXX perhaps _bt_getroot() should
     754              :          * be changed to allow this case.)
     755              :          */
     756       654943 :         if (metad->btm_root == P_NONE)
     757              :         {
     758       298071 :             *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
     759       298071 :             *allequalimage = metad->btm_allequalimage;
     760              : 
     761       298071 :             _bt_relbuf(rel, metabuf);
     762       298071 :             return;
     763              :         }
     764              : 
     765              :         /*
     766              :          * Cache the metapage data for next time
     767              :          *
     768              :          * An on-the-fly version upgrade performed by _bt_upgrademetapage()
     769              :          * can change the nbtree version for an index without invalidating any
     770              :          * local cache.  This is okay because it can only happen when moving
     771              :          * from version 2 to version 3, both of which are !heapkeyspace
     772              :          * versions.
     773              :          */
     774       356872 :         rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
     775              :                                              sizeof(BTMetaPageData));
     776       356872 :         memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
     777       356872 :         _bt_relbuf(rel, metabuf);
     778              :     }
     779              : 
     780              :     /* Get cached page */
     781     14289133 :     metad = (BTMetaPageData *) rel->rd_amcache;
     782              :     /* We shouldn't have cached it if any of these fail */
     783              :     Assert(metad->btm_magic == BTREE_MAGIC);
     784              :     Assert(metad->btm_version >= BTREE_MIN_VERSION);
     785              :     Assert(metad->btm_version <= BTREE_VERSION);
     786              :     Assert(!metad->btm_allequalimage ||
     787              :            metad->btm_version > BTREE_NOVAC_VERSION);
     788              :     Assert(metad->btm_fastroot != P_NONE);
     789              : 
     790     14289133 :     *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
     791     14289133 :     *allequalimage = metad->btm_allequalimage;
     792              : }
     793              : 
     794              : /*
     795              :  *  _bt_checkpage() -- Verify that a freshly-read page looks sane.
     796              :  */
     797              : void
     798     23657212 : _bt_checkpage(Relation rel, Buffer buf)
     799              : {
     800     23657212 :     Page        page = BufferGetPage(buf);
     801              : 
     802              :     /*
     803              :      * ReadBuffer verifies that every newly-read page passes
     804              :      * PageHeaderIsValid, which means it either contains a reasonably sane
     805              :      * page header or is all-zero.  We have to defend against the all-zero
     806              :      * case, however.
     807              :      */
     808     23657212 :     if (PageIsNew(page))
     809            0 :         ereport(ERROR,
     810              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     811              :                  errmsg("index \"%s\" contains unexpected zero page at block %u",
     812              :                         RelationGetRelationName(rel),
     813              :                         BufferGetBlockNumber(buf)),
     814              :                  errhint("Please REINDEX it.")));
     815              : 
     816              :     /*
     817              :      * Additionally check that the special area looks sane.
     818              :      */
     819     23657212 :     if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BTPageOpaqueData)))
     820            0 :         ereport(ERROR,
     821              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
     822              :                  errmsg("index \"%s\" contains corrupted page at block %u",
     823              :                         RelationGetRelationName(rel),
     824              :                         BufferGetBlockNumber(buf)),
     825              :                  errhint("Please REINDEX it.")));
     826     23657212 : }
     827              : 
     828              : /*
     829              :  *  _bt_getbuf() -- Get an existing block in a buffer, for read or write.
     830              :  *
     831              :  *      The general rule in nbtree is that it's never okay to access a
     832              :  *      page without holding both a buffer pin and a buffer lock on
     833              :  *      the page's buffer.
     834              :  *
     835              :  *      When this routine returns, the appropriate lock is set on the
     836              :  *      requested buffer and its reference count has been incremented
     837              :  *      (ie, the buffer is "locked and pinned").  Also, we apply
     838              :  *      _bt_checkpage to sanity-check the page, and perform Valgrind
     839              :  *      client requests that help Valgrind detect unsafe page accesses.
     840              :  *
     841              :  *      Note: raw LockBuffer() calls are disallowed in nbtree; all
     842              :  *      buffer lock requests need to go through wrapper functions such
     843              :  *      as _bt_lockbuf().
     844              :  */
     845              : Buffer
     846     13534125 : _bt_getbuf(Relation rel, BlockNumber blkno, int access)
     847              : {
     848              :     Buffer      buf;
     849              : 
     850              :     Assert(BlockNumberIsValid(blkno));
     851              : 
     852              :     /* Read an existing block of the relation */
     853     13534125 :     buf = ReadBuffer(rel, blkno);
     854     13534125 :     _bt_lockbuf(rel, buf, access);
     855     13534125 :     _bt_checkpage(rel, buf);
     856              : 
     857     13534125 :     return buf;
     858              : }
     859              : 
     860              : /*
     861              :  *  _bt_allocbuf() -- Allocate a new block/page.
     862              :  *
     863              :  * Returns a write-locked buffer containing an unallocated nbtree page.
     864              :  *
     865              :  * Callers are required to pass a valid heaprel.  We need heaprel so that we
     866              :  * can handle generating a snapshotConflictHorizon that makes reusing a page
     867              :  * from the FSM safe for queries that may be running on standbys.
     868              :  */
     869              : Buffer
     870        18769 : _bt_allocbuf(Relation rel, Relation heaprel)
     871              : {
     872              :     Buffer      buf;
     873              :     BlockNumber blkno;
     874              :     Page        page;
     875              : 
     876              :     Assert(heaprel != NULL);
     877              : 
     878              :     /*
     879              :      * First see if the FSM knows of any free pages.
     880              :      *
     881              :      * We can't trust the FSM's report unreservedly; we have to check that the
     882              :      * page is still free.  (For example, an already-free page could have been
     883              :      * re-used between the time the last VACUUM scanned it and the time the
     884              :      * VACUUM made its FSM updates.)
     885              :      *
     886              :      * In fact, it's worse than that: we can't even assume that it's safe to
     887              :      * take a lock on the reported page.  If somebody else has a lock on it,
     888              :      * or even worse our own caller does, we could deadlock.  (The own-caller
     889              :      * scenario is actually not improbable. Consider an index on a serial or
     890              :      * timestamp column.  Nearly all splits will be at the rightmost page, so
     891              :      * it's entirely likely that _bt_split will call us while holding a lock
     892              :      * on the page most recently acquired from FSM. A VACUUM running
     893              :      * concurrently with the previous split could well have placed that page
     894              :      * back in FSM.)
     895              :      *
     896              :      * To get around that, we ask for only a conditional lock on the reported
     897              :      * page.  If we fail, then someone else is using the page, and we may
     898              :      * reasonably assume it's not free.  (If we happen to be wrong, the worst
     899              :      * consequence is the page will be lost to use till the next VACUUM, which
     900              :      * is no big problem.)
     901              :      */
     902              :     for (;;)
     903              :     {
     904        18769 :         blkno = GetFreeIndexPage(rel);
     905        18769 :         if (blkno == InvalidBlockNumber)
     906        18652 :             break;
     907          117 :         buf = ReadBuffer(rel, blkno);
     908          117 :         if (_bt_conditionallockbuf(rel, buf))
     909              :         {
     910          117 :             page = BufferGetPage(buf);
     911              : 
     912              :             /*
     913              :              * It's possible to find an all-zeroes page in an index.  For
     914              :              * example, a backend might successfully extend the relation one
     915              :              * page and then crash before it is able to make a WAL entry for
     916              :              * adding the page.  If we find a zeroed page then reclaim it
     917              :              * immediately.
     918              :              */
     919          117 :             if (PageIsNew(page))
     920              :             {
     921              :                 /* Okay to use page.  Initialize and return it. */
     922            0 :                 _bt_pageinit(page, BufferGetPageSize(buf));
     923            0 :                 return buf;
     924              :             }
     925              : 
     926          117 :             if (BTPageIsRecyclable(page, heaprel))
     927              :             {
     928              :                 /*
     929              :                  * If we are generating WAL for Hot Standby then create a WAL
     930              :                  * record that will allow us to conflict with queries running
     931              :                  * on standby, in case they have snapshots older than safexid
     932              :                  * value
     933              :                  */
     934          117 :                 if (RelationNeedsWAL(rel) && XLogStandbyInfoActive())
     935              :                 {
     936              :                     xl_btree_reuse_page xlrec_reuse;
     937              : 
     938              :                     /*
     939              :                      * Note that we don't register the buffer with the record,
     940              :                      * because this operation doesn't modify the page (that
     941              :                      * already happened, back when VACUUM deleted the page).
     942              :                      * This record only exists to provide a conflict point for
     943              :                      * Hot Standby.  See record REDO routine comments.
     944              :                      */
     945          117 :                     xlrec_reuse.locator = rel->rd_locator;
     946          117 :                     xlrec_reuse.block = blkno;
     947          117 :                     xlrec_reuse.snapshotConflictHorizon = BTPageGetDeleteXid(page);
     948          117 :                     xlrec_reuse.isCatalogRel =
     949          117 :                         RelationIsAccessibleInLogicalDecoding(heaprel);
     950              : 
     951          117 :                     XLogBeginInsert();
     952          117 :                     XLogRegisterData(&xlrec_reuse, SizeOfBtreeReusePage);
     953              : 
     954          117 :                     XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE);
     955              :                 }
     956              : 
     957              :                 /* Okay to use page.  Re-initialize and return it. */
     958          117 :                 _bt_pageinit(page, BufferGetPageSize(buf));
     959          117 :                 return buf;
     960              :             }
     961            0 :             elog(DEBUG2, "FSM returned nonrecyclable page");
     962            0 :             _bt_relbuf(rel, buf);
     963              :         }
     964              :         else
     965              :         {
     966            0 :             elog(DEBUG2, "FSM returned nonlockable page");
     967              :             /* couldn't get lock, so just drop pin */
     968            0 :             ReleaseBuffer(buf);
     969              :         }
     970              :     }
     971              : 
     972              :     /*
     973              :      * Extend the relation by one page. Need to use RBM_ZERO_AND_LOCK or we
     974              :      * risk a race condition against btvacuumscan --- see comments therein.
     975              :      * This forces us to repeat the valgrind request that _bt_lockbuf()
     976              :      * otherwise would make, as we can't use _bt_lockbuf() without introducing
     977              :      * a race.
     978              :      */
     979        18652 :     buf = ExtendBufferedRel(BMR_REL(rel), MAIN_FORKNUM, NULL, EB_LOCK_FIRST);
     980        18652 :     if (!RelationUsesLocalBuffers(rel))
     981              :         VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
     982              : 
     983              :     /* Initialize the new page before returning it */
     984        18652 :     page = BufferGetPage(buf);
     985              :     Assert(PageIsNew(page));
     986        18652 :     _bt_pageinit(page, BufferGetPageSize(buf));
     987              : 
     988        18652 :     return buf;
     989              : }
     990              : 
     991              : /*
     992              :  *  _bt_relandgetbuf() -- release a locked buffer and get another one.
     993              :  *
     994              :  * This is equivalent to _bt_relbuf followed by _bt_getbuf.  Also, if obuf is
     995              :  * InvalidBuffer then it reduces to just _bt_getbuf; allowing this case
     996              :  * simplifies some callers.
     997              :  *
     998              :  * The original motivation for using this was to avoid two entries to the
     999              :  * bufmgr when one would do.  However, now it's mainly just a notational
    1000              :  * convenience.  The only case where it saves work over _bt_relbuf/_bt_getbuf
    1001              :  * is when the target page is the same one already in the buffer.
    1002              :  */
    1003              : Buffer
    1004     10058265 : _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
    1005              : {
    1006              :     Buffer      buf;
    1007              : 
    1008              :     Assert(BlockNumberIsValid(blkno));
    1009     10058265 :     if (BufferIsValid(obuf))
    1010     10050363 :         _bt_unlockbuf(rel, obuf);
    1011     10058265 :     buf = ReleaseAndReadBuffer(obuf, rel, blkno);
    1012     10058265 :     _bt_lockbuf(rel, buf, access);
    1013              : 
    1014     10058265 :     _bt_checkpage(rel, buf);
    1015     10058265 :     return buf;
    1016              : }
    1017              : 
    1018              : /*
    1019              :  *  _bt_relbuf() -- release a locked buffer.
    1020              :  *
    1021              :  * Lock and pin (refcount) are both dropped.
    1022              :  */
    1023              : void
    1024     11249159 : _bt_relbuf(Relation rel, Buffer buf)
    1025              : {
    1026     11249159 :     _bt_unlockbuf(rel, buf);
    1027     11249159 :     ReleaseBuffer(buf);
    1028     11249159 : }
    1029              : 
    1030              : /*
    1031              :  *  _bt_lockbuf() -- lock a pinned buffer.
    1032              :  *
    1033              :  * Lock is acquired without acquiring another pin.  This is like a raw
    1034              :  * LockBuffer() call, but performs extra steps needed by Valgrind.
    1035              :  *
    1036              :  * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
    1037              :  * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
    1038              :  */
    1039              : void
    1040     24101400 : _bt_lockbuf(Relation rel, Buffer buf, int access)
    1041              : {
    1042              :     /* LockBuffer() asserts that pin is held by this backend */
    1043     24101400 :     LockBuffer(buf, access);
    1044              : 
    1045              :     /*
    1046              :      * It doesn't matter that _bt_unlockbuf() won't get called in the event of
    1047              :      * an nbtree error (e.g. a unique violation error).  That won't cause
    1048              :      * Valgrind false positives.
    1049              :      *
    1050              :      * The nbtree client requests are superimposed on top of the bufmgr.c
    1051              :      * buffer pin client requests.  In the event of an nbtree error the buffer
    1052              :      * will certainly get marked as defined when the backend once again
    1053              :      * acquires its first pin on the buffer. (Of course, if the backend never
    1054              :      * touches the buffer again then it doesn't matter that it remains
    1055              :      * non-accessible to Valgrind.)
    1056              :      *
    1057              :      * Note: When an IndexTuple C pointer gets computed using an ItemId read
    1058              :      * from a page while a lock was held, the C pointer becomes unsafe to
    1059              :      * dereference forever as soon as the lock is released.  Valgrind can only
    1060              :      * detect cases where the pointer gets dereferenced with no _current_
    1061              :      * lock/pin held, though.
    1062              :      */
    1063     24101400 :     if (!RelationUsesLocalBuffers(rel))
    1064              :         VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
    1065     24101400 : }
    1066              : 
    1067              : /*
    1068              :  *  _bt_unlockbuf() -- unlock a pinned buffer.
    1069              :  */
    1070              : void
    1071     24148838 : _bt_unlockbuf(Relation rel, Buffer buf)
    1072              : {
    1073              :     /*
    1074              :      * Buffer is pinned and locked, which means that it is expected to be
    1075              :      * defined and addressable.  Check that proactively.
    1076              :      */
    1077              :     VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
    1078              : 
    1079              :     /* LockBuffer() asserts that pin is held by this backend */
    1080     24148838 :     LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    1081              : 
    1082     24148838 :     if (!RelationUsesLocalBuffers(rel))
    1083              :         VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
    1084     24148838 : }
    1085              : 
    1086              : /*
    1087              :  *  _bt_conditionallockbuf() -- conditionally BT_WRITE lock pinned
    1088              :  *  buffer.
    1089              :  *
    1090              :  * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
    1091              :  * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
    1092              :  */
    1093              : bool
    1094        29094 : _bt_conditionallockbuf(Relation rel, Buffer buf)
    1095              : {
    1096              :     /* ConditionalLockBuffer() asserts that pin is held by this backend */
    1097        29094 :     if (!ConditionalLockBuffer(buf))
    1098          301 :         return false;
    1099              : 
    1100        28793 :     if (!RelationUsesLocalBuffers(rel))
    1101              :         VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
    1102              : 
    1103        28793 :     return true;
    1104              : }
    1105              : 
    1106              : /*
    1107              :  *  _bt_upgradelockbufcleanup() -- upgrade lock to a full cleanup lock.
    1108              :  */
    1109              : void
    1110        12982 : _bt_upgradelockbufcleanup(Relation rel, Buffer buf)
    1111              : {
    1112              :     /*
    1113              :      * Buffer is pinned and locked, which means that it is expected to be
    1114              :      * defined and addressable.  Check that proactively.
    1115              :      */
    1116              :     VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
    1117              : 
    1118              :     /* LockBuffer() asserts that pin is held by this backend */
    1119        12982 :     LockBuffer(buf, BUFFER_LOCK_UNLOCK);
    1120        12982 :     LockBufferForCleanup(buf);
    1121        12982 : }
    1122              : 
    1123              : /*
    1124              :  *  _bt_pageinit() -- Initialize a new page.
    1125              :  *
    1126              :  * On return, the page header is initialized; data space is empty;
    1127              :  * special space is zeroed out.
    1128              :  */
    1129              : void
    1130        87939 : _bt_pageinit(Page page, Size size)
    1131              : {
    1132        87939 :     PageInit(page, size, sizeof(BTPageOpaqueData));
    1133        87939 : }
    1134              : 
    1135              : /*
    1136              :  * Delete item(s) from a btree leaf page during VACUUM.
    1137              :  *
    1138              :  * This routine assumes that the caller already has a full cleanup lock on
    1139              :  * the buffer.  Also, the given deletable and updatable arrays *must* be
    1140              :  * sorted in ascending order.
    1141              :  *
    1142              :  * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
    1143              :  * in an existing posting list item are to be removed.  This works by
    1144              :  * updating/overwriting an existing item with caller's new version of the item
    1145              :  * (a version that lacks the TIDs that are to be deleted).
    1146              :  *
    1147              :  * We record VACUUMs and b-tree deletes differently in WAL.  Deletes must
    1148              :  * generate their own snapshotConflictHorizon directly from the tableam,
    1149              :  * whereas VACUUMs rely on the initial VACUUM table scan performing
    1150              :  * WAL-logging that takes care of the issue for the table's indexes
    1151              :  * indirectly.  Also, we remove the VACUUM cycle ID from pages, which b-tree
    1152              :  * deletes don't do.
    1153              :  */
    1154              : void
    1155         7984 : _bt_delitems_vacuum(Relation rel, Buffer buf,
    1156              :                     OffsetNumber *deletable, int ndeletable,
    1157              :                     BTVacuumPosting *updatable, int nupdatable)
    1158              : {
    1159         7984 :     Page        page = BufferGetPage(buf);
    1160              :     BTPageOpaque opaque;
    1161         7984 :     bool        needswal = RelationNeedsWAL(rel);
    1162         7984 :     char       *updatedbuf = NULL;
    1163         7984 :     Size        updatedbuflen = 0;
    1164              :     OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
    1165              : 
    1166              :     /* Shouldn't be called unless there's something to do */
    1167              :     Assert(ndeletable > 0 || nupdatable > 0);
    1168              : 
    1169              :     /* Generate new version of posting lists without deleted TIDs */
    1170         7984 :     if (nupdatable > 0)
    1171          895 :         updatedbuf = _bt_delitems_update(updatable, nupdatable,
    1172              :                                          updatedoffsets, &updatedbuflen,
    1173              :                                          needswal);
    1174              : 
    1175              :     /* No ereport(ERROR) until changes are logged */
    1176         7984 :     START_CRIT_SECTION();
    1177              : 
    1178              :     /*
    1179              :      * Handle posting tuple updates.
    1180              :      *
    1181              :      * Deliberately do this before handling simple deletes.  If we did it the
    1182              :      * other way around (i.e. WAL record order -- simple deletes before
    1183              :      * updates) then we'd have to make compensating changes to the 'updatable'
    1184              :      * array of offset numbers.
    1185              :      *
    1186              :      * PageIndexTupleOverwrite() won't unset each item's LP_DEAD bit when it
    1187              :      * happens to already be set.  It's important that we not interfere with
    1188              :      * any future simple index tuple deletion operations.
    1189              :      */
    1190        26995 :     for (int i = 0; i < nupdatable; i++)
    1191              :     {
    1192        19011 :         OffsetNumber updatedoffset = updatedoffsets[i];
    1193              :         IndexTuple  itup;
    1194              :         Size        itemsz;
    1195              : 
    1196        19011 :         itup = updatable[i]->itup;
    1197        19011 :         itemsz = MAXALIGN(IndexTupleSize(itup));
    1198        19011 :         if (!PageIndexTupleOverwrite(page, updatedoffset, itup, itemsz))
    1199            0 :             elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
    1200              :                  BufferGetBlockNumber(buf), RelationGetRelationName(rel));
    1201              :     }
    1202              : 
    1203              :     /* Now handle simple deletes of entire tuples */
    1204         7984 :     if (ndeletable > 0)
    1205         7703 :         PageIndexMultiDelete(page, deletable, ndeletable);
    1206              : 
    1207              :     /*
    1208              :      * We can clear the vacuum cycle ID since this page has certainly been
    1209              :      * processed by the current vacuum scan.
    1210              :      */
    1211         7984 :     opaque = BTPageGetOpaque(page);
    1212         7984 :     opaque->btpo_cycleid = 0;
    1213              : 
    1214              :     /*
    1215              :      * Clear the BTP_HAS_GARBAGE page flag.
    1216              :      *
    1217              :      * This flag indicates the presence of LP_DEAD items on the page (though
    1218              :      * not reliably).  Note that we only rely on it with pg_upgrade'd
    1219              :      * !heapkeyspace indexes.  That's why clearing it here won't usually
    1220              :      * interfere with simple index tuple deletion.
    1221              :      */
    1222         7984 :     opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
    1223              : 
    1224         7984 :     MarkBufferDirty(buf);
    1225              : 
    1226              :     /* XLOG stuff */
    1227         7984 :     if (needswal)
    1228              :     {
    1229              :         XLogRecPtr  recptr;
    1230              :         xl_btree_vacuum xlrec_vacuum;
    1231              : 
    1232         7983 :         xlrec_vacuum.ndeleted = ndeletable;
    1233         7983 :         xlrec_vacuum.nupdated = nupdatable;
    1234              : 
    1235         7983 :         XLogBeginInsert();
    1236         7983 :         XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    1237         7983 :         XLogRegisterData(&xlrec_vacuum, SizeOfBtreeVacuum);
    1238              : 
    1239         7983 :         if (ndeletable > 0)
    1240         7702 :             XLogRegisterBufData(0, deletable,
    1241              :                                 ndeletable * sizeof(OffsetNumber));
    1242              : 
    1243         7983 :         if (nupdatable > 0)
    1244              :         {
    1245          895 :             XLogRegisterBufData(0, updatedoffsets,
    1246              :                                 nupdatable * sizeof(OffsetNumber));
    1247          895 :             XLogRegisterBufData(0, updatedbuf, updatedbuflen);
    1248              :         }
    1249              : 
    1250         7983 :         recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM);
    1251              : 
    1252         7983 :         PageSetLSN(page, recptr);
    1253              :     }
    1254              : 
    1255         7984 :     END_CRIT_SECTION();
    1256              : 
    1257              :     /* can't leak memory here */
    1258         7984 :     if (updatedbuf != NULL)
    1259          895 :         pfree(updatedbuf);
    1260              :     /* free tuples allocated within _bt_delitems_update() */
    1261        26995 :     for (int i = 0; i < nupdatable; i++)
    1262        19011 :         pfree(updatable[i]->itup);
    1263         7984 : }
    1264              : 
    1265              : /*
    1266              :  * Delete item(s) from a btree leaf page during single-page cleanup.
    1267              :  *
    1268              :  * This routine assumes that the caller has pinned and write locked the
    1269              :  * buffer.  Also, the given deletable and updatable arrays *must* be sorted in
    1270              :  * ascending order.
    1271              :  *
    1272              :  * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
    1273              :  * in an existing posting list item are to be removed.  This works by
    1274              :  * updating/overwriting an existing item with caller's new version of the item
    1275              :  * (a version that lacks the TIDs that are to be deleted).
    1276              :  *
    1277              :  * This is nearly the same as _bt_delitems_vacuum as far as what it does to
    1278              :  * the page, but it needs its own snapshotConflictHorizon and isCatalogRel
    1279              :  * (from the tableam).  This is used by the REDO routine to generate recovery
    1280              :  * conflicts.  The other difference is that only _bt_delitems_vacuum will
    1281              :  * clear page's VACUUM cycle ID.
    1282              :  */
    1283              : static void
    1284         4334 : _bt_delitems_delete(Relation rel, Buffer buf,
    1285              :                     TransactionId snapshotConflictHorizon, bool isCatalogRel,
    1286              :                     OffsetNumber *deletable, int ndeletable,
    1287              :                     BTVacuumPosting *updatable, int nupdatable)
    1288              : {
    1289         4334 :     Page        page = BufferGetPage(buf);
    1290              :     BTPageOpaque opaque;
    1291         4334 :     bool        needswal = RelationNeedsWAL(rel);
    1292         4334 :     char       *updatedbuf = NULL;
    1293         4334 :     Size        updatedbuflen = 0;
    1294              :     OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
    1295              : 
    1296              :     /* Shouldn't be called unless there's something to do */
    1297              :     Assert(ndeletable > 0 || nupdatable > 0);
    1298              : 
    1299              :     /* Generate new versions of posting lists without deleted TIDs */
    1300         4334 :     if (nupdatable > 0)
    1301          453 :         updatedbuf = _bt_delitems_update(updatable, nupdatable,
    1302              :                                          updatedoffsets, &updatedbuflen,
    1303              :                                          needswal);
    1304              : 
    1305              :     /* No ereport(ERROR) until changes are logged */
    1306         4334 :     START_CRIT_SECTION();
    1307              : 
    1308              :     /* Handle updates and deletes just like _bt_delitems_vacuum */
    1309        11625 :     for (int i = 0; i < nupdatable; i++)
    1310              :     {
    1311         7291 :         OffsetNumber updatedoffset = updatedoffsets[i];
    1312              :         IndexTuple  itup;
    1313              :         Size        itemsz;
    1314              : 
    1315         7291 :         itup = updatable[i]->itup;
    1316         7291 :         itemsz = MAXALIGN(IndexTupleSize(itup));
    1317         7291 :         if (!PageIndexTupleOverwrite(page, updatedoffset, itup, itemsz))
    1318            0 :             elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
    1319              :                  BufferGetBlockNumber(buf), RelationGetRelationName(rel));
    1320              :     }
    1321              : 
    1322         4334 :     if (ndeletable > 0)
    1323         4243 :         PageIndexMultiDelete(page, deletable, ndeletable);
    1324              : 
    1325              :     /*
    1326              :      * Unlike _bt_delitems_vacuum, we *must not* clear the vacuum cycle ID at
    1327              :      * this point.  The VACUUM command alone controls vacuum cycle IDs.
    1328              :      */
    1329         4334 :     opaque = BTPageGetOpaque(page);
    1330              : 
    1331              :     /*
    1332              :      * Clear the BTP_HAS_GARBAGE page flag.
    1333              :      *
    1334              :      * This flag indicates the presence of LP_DEAD items on the page (though
    1335              :      * not reliably).  Note that we only rely on it with pg_upgrade'd
    1336              :      * !heapkeyspace indexes.
    1337              :      */
    1338         4334 :     opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
    1339              : 
    1340         4334 :     MarkBufferDirty(buf);
    1341              : 
    1342              :     /* XLOG stuff */
    1343         4334 :     if (needswal)
    1344              :     {
    1345              :         XLogRecPtr  recptr;
    1346              :         xl_btree_delete xlrec_delete;
    1347              : 
    1348         4334 :         xlrec_delete.snapshotConflictHorizon = snapshotConflictHorizon;
    1349         4334 :         xlrec_delete.ndeleted = ndeletable;
    1350         4334 :         xlrec_delete.nupdated = nupdatable;
    1351         4334 :         xlrec_delete.isCatalogRel = isCatalogRel;
    1352              : 
    1353         4334 :         XLogBeginInsert();
    1354         4334 :         XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
    1355         4334 :         XLogRegisterData(&xlrec_delete, SizeOfBtreeDelete);
    1356              : 
    1357         4334 :         if (ndeletable > 0)
    1358         4243 :             XLogRegisterBufData(0, deletable,
    1359              :                                 ndeletable * sizeof(OffsetNumber));
    1360              : 
    1361         4334 :         if (nupdatable > 0)
    1362              :         {
    1363          453 :             XLogRegisterBufData(0, updatedoffsets,
    1364              :                                 nupdatable * sizeof(OffsetNumber));
    1365          453 :             XLogRegisterBufData(0, updatedbuf, updatedbuflen);
    1366              :         }
    1367              : 
    1368         4334 :         recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE);
    1369              : 
    1370         4334 :         PageSetLSN(page, recptr);
    1371              :     }
    1372              : 
    1373         4334 :     END_CRIT_SECTION();
    1374              : 
    1375              :     /* can't leak memory here */
    1376         4334 :     if (updatedbuf != NULL)
    1377          453 :         pfree(updatedbuf);
    1378              :     /* free tuples allocated within _bt_delitems_update() */
    1379        11625 :     for (int i = 0; i < nupdatable; i++)
    1380         7291 :         pfree(updatable[i]->itup);
    1381         4334 : }
    1382              : 
    1383              : /*
    1384              :  * Set up state needed to delete TIDs from posting list tuples via "updating"
    1385              :  * the tuple.  Performs steps common to both _bt_delitems_vacuum and
    1386              :  * _bt_delitems_delete.  These steps must take place before each function's
    1387              :  * critical section begins.
    1388              :  *
    1389              :  * updatable and nupdatable are inputs, though note that we will use
    1390              :  * _bt_update_posting() to replace the original itup with a pointer to a final
    1391              :  * version in palloc()'d memory.  Caller should free the tuples when its done.
    1392              :  *
    1393              :  * The first nupdatable entries from updatedoffsets are set to the page offset
    1394              :  * number for posting list tuples that caller updates.  This is mostly useful
    1395              :  * because caller may need to WAL-log the page offsets (though we always do
    1396              :  * this for caller out of convenience).
    1397              :  *
    1398              :  * Returns buffer consisting of an array of xl_btree_update structs that
    1399              :  * describe the steps we perform here for caller (though only when needswal is
    1400              :  * true).  Also sets *updatedbuflen to the final size of the buffer.  This
    1401              :  * buffer is used by caller when WAL logging is required.
    1402              :  */
    1403              : static char *
    1404         1348 : _bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
    1405              :                     OffsetNumber *updatedoffsets, Size *updatedbuflen,
    1406              :                     bool needswal)
    1407              : {
    1408         1348 :     char       *updatedbuf = NULL;
    1409         1348 :     Size        buflen = 0;
    1410              : 
    1411              :     /* Shouldn't be called unless there's something to do */
    1412              :     Assert(nupdatable > 0);
    1413              : 
    1414        27650 :     for (int i = 0; i < nupdatable; i++)
    1415              :     {
    1416        26302 :         BTVacuumPosting vacposting = updatable[i];
    1417              :         Size        itemsz;
    1418              : 
    1419              :         /* Replace work area IndexTuple with updated version */
    1420        26302 :         _bt_update_posting(vacposting);
    1421              : 
    1422              :         /* Keep track of size of xl_btree_update for updatedbuf in passing */
    1423        26302 :         itemsz = SizeOfBtreeUpdate + vacposting->ndeletedtids * sizeof(uint16);
    1424        26302 :         buflen += itemsz;
    1425              : 
    1426              :         /* Build updatedoffsets buffer in passing */
    1427        26302 :         updatedoffsets[i] = vacposting->updatedoffset;
    1428              :     }
    1429              : 
    1430              :     /* XLOG stuff */
    1431         1348 :     if (needswal)
    1432              :     {
    1433         1348 :         Size        offset = 0;
    1434              : 
    1435              :         /* Allocate, set final size for caller */
    1436         1348 :         updatedbuf = palloc(buflen);
    1437         1348 :         *updatedbuflen = buflen;
    1438        27650 :         for (int i = 0; i < nupdatable; i++)
    1439              :         {
    1440        26302 :             BTVacuumPosting vacposting = updatable[i];
    1441              :             Size        itemsz;
    1442              :             xl_btree_update update;
    1443              : 
    1444        26302 :             update.ndeletedtids = vacposting->ndeletedtids;
    1445        26302 :             memcpy(updatedbuf + offset, &update.ndeletedtids,
    1446              :                    SizeOfBtreeUpdate);
    1447        26302 :             offset += SizeOfBtreeUpdate;
    1448              : 
    1449        26302 :             itemsz = update.ndeletedtids * sizeof(uint16);
    1450        26302 :             memcpy(updatedbuf + offset, vacposting->deletetids, itemsz);
    1451        26302 :             offset += itemsz;
    1452              :         }
    1453              :     }
    1454              : 
    1455         1348 :     return updatedbuf;
    1456              : }
    1457              : 
    1458              : /*
    1459              :  * Comparator used by _bt_delitems_delete_check() to restore deltids array
    1460              :  * back to its original leaf-page-wise sort order
    1461              :  */
    1462              : static int
    1463      2614205 : _bt_delitems_cmp(const void *a, const void *b)
    1464              : {
    1465      2614205 :     const TM_IndexDelete *indexdelete1 = a;
    1466      2614205 :     const TM_IndexDelete *indexdelete2 = b;
    1467              : 
    1468              :     Assert(indexdelete1->id != indexdelete2->id);
    1469              : 
    1470      2614205 :     return pg_cmp_s16(indexdelete1->id, indexdelete2->id);
    1471              : }
    1472              : 
    1473              : /*
    1474              :  * Try to delete item(s) from a btree leaf page during single-page cleanup.
    1475              :  *
    1476              :  * nbtree interface to table_index_delete_tuples().  Deletes a subset of index
    1477              :  * tuples from caller's deltids array: those whose TIDs are found safe to
    1478              :  * delete by the tableam (or already marked LP_DEAD in index, and so already
    1479              :  * known to be deletable by our simple index deletion caller).  We physically
    1480              :  * delete index tuples from buf leaf page last of all (for index tuples where
    1481              :  * that is known to be safe following our table_index_delete_tuples() call).
    1482              :  *
    1483              :  * Simple index deletion caller only includes TIDs from index tuples marked
    1484              :  * LP_DEAD, as well as extra TIDs it found on the same leaf page that can be
    1485              :  * included without increasing the total number of distinct table blocks for
    1486              :  * the deletion operation as a whole.  This approach often allows us to delete
    1487              :  * some extra index tuples that were practically free for tableam to check in
    1488              :  * passing (when they actually turn out to be safe to delete).  It probably
    1489              :  * only makes sense for the tableam to go ahead with these extra checks when
    1490              :  * it is block-oriented (otherwise the checks probably won't be practically
    1491              :  * free, which we rely on).  The tableam interface requires the tableam side
    1492              :  * to handle the problem, though, so this is okay (we as an index AM are free
    1493              :  * to make the simplifying assumption that all tableams must be block-based).
    1494              :  *
    1495              :  * Bottom-up index deletion caller provides all the TIDs from the leaf page,
    1496              :  * without expecting that tableam will check most of them.  The tableam has
    1497              :  * considerable discretion around which entries/blocks it checks.  Our role in
    1498              :  * costing the bottom-up deletion operation is strictly advisory.
    1499              :  *
    1500              :  * Note: Caller must have added deltids entries (i.e. entries that go in
    1501              :  * delstate's main array) in leaf-page-wise order: page offset number order,
    1502              :  * TID order among entries taken from the same posting list tuple (tiebreak on
    1503              :  * TID).  This order is convenient to work with here.
    1504              :  *
    1505              :  * Note: We also rely on the id field of each deltids element "capturing" this
    1506              :  * original leaf-page-wise order.  That is, we expect to be able to get back
    1507              :  * to the original leaf-page-wise order just by sorting deltids on the id
    1508              :  * field (tableam will sort deltids for its own reasons, so we'll need to put
    1509              :  * it back in leaf-page-wise order afterwards).
    1510              :  */
    1511              : void
    1512         5928 : _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel,
    1513              :                           TM_IndexDeleteOp *delstate)
    1514              : {
    1515         5928 :     Page        page = BufferGetPage(buf);
    1516              :     TransactionId snapshotConflictHorizon;
    1517              :     bool        isCatalogRel;
    1518         5928 :     OffsetNumber postingidxoffnum = InvalidOffsetNumber;
    1519         5928 :     int         ndeletable = 0,
    1520         5928 :                 nupdatable = 0;
    1521              :     OffsetNumber deletable[MaxIndexTuplesPerPage];
    1522              :     BTVacuumPosting updatable[MaxIndexTuplesPerPage];
    1523              : 
    1524              :     /* Use tableam interface to determine which tuples to delete first */
    1525         5928 :     snapshotConflictHorizon = table_index_delete_tuples(heapRel, delstate);
    1526         5928 :     isCatalogRel = RelationIsAccessibleInLogicalDecoding(heapRel);
    1527              : 
    1528              :     /* Should not WAL-log snapshotConflictHorizon unless it's required */
    1529         5928 :     if (!XLogStandbyInfoActive())
    1530          687 :         snapshotConflictHorizon = InvalidTransactionId;
    1531              : 
    1532              :     /*
    1533              :      * Construct a leaf-page-wise description of what _bt_delitems_delete()
    1534              :      * needs to do to physically delete index tuples from the page.
    1535              :      *
    1536              :      * Must sort deltids array to restore leaf-page-wise order (original order
    1537              :      * before call to tableam).  This is the order that the loop expects.
    1538              :      *
    1539              :      * Note that deltids array might be a lot smaller now.  It might even have
    1540              :      * no entries at all (with bottom-up deletion caller), in which case there
    1541              :      * is nothing left to do.
    1542              :      */
    1543         5928 :     qsort(delstate->deltids, delstate->ndeltids, sizeof(TM_IndexDelete),
    1544              :           _bt_delitems_cmp);
    1545         5928 :     if (delstate->ndeltids == 0)
    1546              :     {
    1547              :         Assert(delstate->bottomup);
    1548         1594 :         return;
    1549              :     }
    1550              : 
    1551              :     /* We definitely have to delete at least one index tuple (or one TID) */
    1552       385753 :     for (int i = 0; i < delstate->ndeltids; i++)
    1553              :     {
    1554       381419 :         TM_IndexStatus *dstatus = delstate->status + delstate->deltids[i].id;
    1555       381419 :         OffsetNumber idxoffnum = dstatus->idxoffnum;
    1556       381419 :         ItemId      itemid = PageGetItemId(page, idxoffnum);
    1557       381419 :         IndexTuple  itup = (IndexTuple) PageGetItem(page, itemid);
    1558              :         int         nestedi,
    1559              :                     nitem;
    1560              :         BTVacuumPosting vacposting;
    1561              : 
    1562              :         Assert(OffsetNumberIsValid(idxoffnum));
    1563              : 
    1564       381419 :         if (idxoffnum == postingidxoffnum)
    1565              :         {
    1566              :             /*
    1567              :              * This deltid entry is a TID from a posting list tuple that has
    1568              :              * already been completely processed
    1569              :              */
    1570              :             Assert(BTreeTupleIsPosting(itup));
    1571              :             Assert(ItemPointerCompare(BTreeTupleGetHeapTID(itup),
    1572              :                                       &delstate->deltids[i].tid) < 0);
    1573              :             Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(itup),
    1574              :                                       &delstate->deltids[i].tid) >= 0);
    1575        14876 :             continue;
    1576              :         }
    1577              : 
    1578       366543 :         if (!BTreeTupleIsPosting(itup))
    1579              :         {
    1580              :             /* Plain non-pivot tuple */
    1581              :             Assert(ItemPointerEquals(&itup->t_tid, &delstate->deltids[i].tid));
    1582       351396 :             if (dstatus->knowndeletable)
    1583       281023 :                 deletable[ndeletable++] = idxoffnum;
    1584       351396 :             continue;
    1585              :         }
    1586              : 
    1587              :         /*
    1588              :          * itup is a posting list tuple whose lowest deltids entry (which may
    1589              :          * or may not be for the first TID from itup) is considered here now.
    1590              :          * We should process all of the deltids entries for the posting list
    1591              :          * together now, though (not just the lowest).  Remember to skip over
    1592              :          * later itup-related entries during later iterations of outermost
    1593              :          * loop.
    1594              :          */
    1595        15147 :         postingidxoffnum = idxoffnum;   /* Remember work in outermost loop */
    1596        15147 :         nestedi = i;            /* Initialize for first itup deltids entry */
    1597        15147 :         vacposting = NULL;      /* Describes final action for itup */
    1598        15147 :         nitem = BTreeTupleGetNPosting(itup);
    1599        72496 :         for (int p = 0; p < nitem; p++)
    1600              :         {
    1601        57349 :             ItemPointer ptid = BTreeTupleGetPostingN(itup, p);
    1602        57349 :             int         ptidcmp = -1;
    1603              : 
    1604              :             /*
    1605              :              * This nested loop reuses work across ptid TIDs taken from itup.
    1606              :              * We take advantage of the fact that both itup's TIDs and deltids
    1607              :              * entries (within a single itup/posting list grouping) must both
    1608              :              * be in ascending TID order.
    1609              :              */
    1610        79134 :             for (; nestedi < delstate->ndeltids; nestedi++)
    1611              :             {
    1612        76192 :                 TM_IndexDelete *tcdeltid = &delstate->deltids[nestedi];
    1613        76192 :                 TM_IndexStatus *tdstatus = (delstate->status + tcdeltid->id);
    1614              : 
    1615              :                 /* Stop once we get past all itup related deltids entries */
    1616              :                 Assert(tdstatus->idxoffnum >= idxoffnum);
    1617        76192 :                 if (tdstatus->idxoffnum != idxoffnum)
    1618        19166 :                     break;
    1619              : 
    1620              :                 /* Skip past non-deletable itup related entries up front */
    1621        57026 :                 if (!tdstatus->knowndeletable)
    1622         4371 :                     continue;
    1623              : 
    1624              :                 /* Entry is first partial ptid match (or an exact match)? */
    1625        52655 :                 ptidcmp = ItemPointerCompare(&tcdeltid->tid, ptid);
    1626        52655 :                 if (ptidcmp >= 0)
    1627              :                 {
    1628              :                     /* Greater than or equal (partial or exact) match... */
    1629        35241 :                     break;
    1630              :                 }
    1631              :             }
    1632              : 
    1633              :             /* ...exact ptid match to a deletable deltids entry? */
    1634        57349 :             if (ptidcmp != 0)
    1635        31697 :                 continue;
    1636              : 
    1637              :             /* Exact match for deletable deltids entry -- ptid gets deleted */
    1638        25652 :             if (vacposting == NULL)
    1639              :             {
    1640        13799 :                 vacposting = palloc(offsetof(BTVacuumPostingData, deletetids) +
    1641              :                                     nitem * sizeof(uint16));
    1642        13799 :                 vacposting->itup = itup;
    1643        13799 :                 vacposting->updatedoffset = idxoffnum;
    1644        13799 :                 vacposting->ndeletedtids = 0;
    1645              :             }
    1646        25652 :             vacposting->deletetids[vacposting->ndeletedtids++] = p;
    1647              :         }
    1648              : 
    1649              :         /* Final decision on itup, a posting list tuple */
    1650              : 
    1651        15147 :         if (vacposting == NULL)
    1652              :         {
    1653              :             /* No TIDs to delete from itup -- do nothing */
    1654              :         }
    1655        13799 :         else if (vacposting->ndeletedtids == nitem)
    1656              :         {
    1657              :             /* Straight delete of itup (to delete all TIDs) */
    1658         6508 :             deletable[ndeletable++] = idxoffnum;
    1659              :             /* Turns out we won't need granular information */
    1660         6508 :             pfree(vacposting);
    1661              :         }
    1662              :         else
    1663              :         {
    1664              :             /* Delete some (but not all) TIDs from itup */
    1665              :             Assert(vacposting->ndeletedtids > 0 &&
    1666              :                    vacposting->ndeletedtids < nitem);
    1667         7291 :             updatable[nupdatable++] = vacposting;
    1668              :         }
    1669              :     }
    1670              : 
    1671              :     /* Physically delete tuples (or TIDs) using deletable (or updatable) */
    1672         4334 :     _bt_delitems_delete(rel, buf, snapshotConflictHorizon, isCatalogRel,
    1673              :                         deletable, ndeletable, updatable, nupdatable);
    1674              : 
    1675              :     /* be tidy */
    1676        11625 :     for (int i = 0; i < nupdatable; i++)
    1677         7291 :         pfree(updatable[i]);
    1678              : }
    1679              : 
    1680              : /*
    1681              :  * Check that leftsib page (the btpo_prev of target page) is not marked with
    1682              :  * INCOMPLETE_SPLIT flag.  Used during page deletion.
    1683              :  *
    1684              :  * Returning true indicates that page flag is set in leftsib (which is
    1685              :  * definitely still the left sibling of target).  When that happens, the
    1686              :  * target doesn't have a downlink in parent, and the page deletion algorithm
    1687              :  * isn't prepared to handle that.  Deletion of the target page (or the whole
    1688              :  * subtree that contains the target page) cannot take place.
    1689              :  *
    1690              :  * Caller should not have a lock on the target page itself, since pages on the
    1691              :  * same level must always be locked left to right to avoid deadlocks.
    1692              :  */
    1693              : static bool
    1694         2959 : _bt_leftsib_splitflag(Relation rel, BlockNumber leftsib, BlockNumber target)
    1695              : {
    1696              :     Buffer      buf;
    1697              :     Page        page;
    1698              :     BTPageOpaque opaque;
    1699              :     bool        result;
    1700              : 
    1701              :     /* Easy case: No left sibling */
    1702         2959 :     if (leftsib == P_NONE)
    1703         2269 :         return false;
    1704              : 
    1705          690 :     buf = _bt_getbuf(rel, leftsib, BT_READ);
    1706          690 :     page = BufferGetPage(buf);
    1707          690 :     opaque = BTPageGetOpaque(page);
    1708              : 
    1709              :     /*
    1710              :      * If the left sibling was concurrently split, so that its next-pointer
    1711              :      * doesn't point to the current page anymore, the split that created
    1712              :      * target must be completed.  Caller can reasonably expect that there will
    1713              :      * be a downlink to the target page that it can relocate using its stack.
    1714              :      * (We don't allow splitting an incompletely split page again until the
    1715              :      * previous split has been completed.)
    1716              :      */
    1717          690 :     result = (opaque->btpo_next == target && P_INCOMPLETE_SPLIT(opaque));
    1718          690 :     _bt_relbuf(rel, buf);
    1719              : 
    1720          690 :     return result;
    1721              : }
    1722              : 
    1723              : /*
    1724              :  * Check that leafrightsib page (the btpo_next of target leaf page) is not
    1725              :  * marked with ISHALFDEAD flag.  Used during page deletion.
    1726              :  *
    1727              :  * Returning true indicates that page flag is set in leafrightsib, so page
    1728              :  * deletion cannot go ahead.  Our caller is not prepared to deal with the case
    1729              :  * where the parent page does not have a pivot tuples whose downlink points to
    1730              :  * leafrightsib (due to an earlier interrupted VACUUM operation).  It doesn't
    1731              :  * seem worth going to the trouble of teaching our caller to deal with it.
    1732              :  * The situation will be resolved after VACUUM finishes the deletion of the
    1733              :  * half-dead page (when a future VACUUM operation reaches the target page
    1734              :  * again).
    1735              :  *
    1736              :  * _bt_leftsib_splitflag() is called for both leaf pages and internal pages.
    1737              :  * _bt_rightsib_halfdeadflag() is only called for leaf pages, though.  This is
    1738              :  * okay because of the restriction on deleting pages that are the rightmost
    1739              :  * page of their parent (i.e. that such deletions can only take place when the
    1740              :  * entire subtree must be deleted).  The leaf level check made here will apply
    1741              :  * to a right "cousin" leaf page rather than a simple right sibling leaf page
    1742              :  * in cases where caller actually goes on to attempt deleting pages that are
    1743              :  * above the leaf page.  The right cousin leaf page is representative of the
    1744              :  * left edge of the subtree to the right of the to-be-deleted subtree as a
    1745              :  * whole, which is exactly the condition that our caller cares about.
    1746              :  * (Besides, internal pages are never marked half-dead, so it isn't even
    1747              :  * possible to _directly_ assess if an internal page is part of some other
    1748              :  * to-be-deleted subtree.)
    1749              :  */
    1750              : static bool
    1751         2872 : _bt_rightsib_halfdeadflag(Relation rel, BlockNumber leafrightsib)
    1752              : {
    1753              :     Buffer      buf;
    1754              :     Page        page;
    1755              :     BTPageOpaque opaque;
    1756              :     bool        result;
    1757              : 
    1758              :     Assert(leafrightsib != P_NONE);
    1759              : 
    1760         2872 :     buf = _bt_getbuf(rel, leafrightsib, BT_READ);
    1761         2872 :     page = BufferGetPage(buf);
    1762         2872 :     opaque = BTPageGetOpaque(page);
    1763              : 
    1764              :     Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque));
    1765         2872 :     result = P_ISHALFDEAD(opaque);
    1766         2872 :     _bt_relbuf(rel, buf);
    1767              : 
    1768         2872 :     return result;
    1769              : }
    1770              : 
    1771              : /*
    1772              :  * _bt_pagedel() -- Delete a leaf page from the b-tree, if legal to do so.
    1773              :  *
    1774              :  * This action unlinks the leaf page from the b-tree structure, removing all
    1775              :  * pointers leading to it --- but not touching its own left and right links.
    1776              :  * The page cannot be physically reclaimed right away, since other processes
    1777              :  * may currently be trying to follow links leading to the page; they have to
    1778              :  * be allowed to use its right-link to recover.  See nbtree/README.
    1779              :  *
    1780              :  * On entry, the target buffer must be pinned and locked (either read or write
    1781              :  * lock is OK).  The page must be an empty leaf page, which may be half-dead
    1782              :  * already (a half-dead page should only be passed to us when an earlier
    1783              :  * VACUUM operation was interrupted, though).  Note in particular that caller
    1784              :  * should never pass a buffer containing an existing deleted page here.  The
    1785              :  * lock and pin on caller's buffer will be dropped before we return.
    1786              :  *
    1787              :  * Maintains bulk delete stats for caller, which are taken from vstate.  We
    1788              :  * need to cooperate closely with caller here so that whole VACUUM operation
    1789              :  * reliably avoids any double counting of subsidiary-to-leafbuf pages that we
    1790              :  * delete in passing.  If such pages happen to be from a block number that is
    1791              :  * ahead of the current scanblkno position, then caller is expected to count
    1792              :  * them directly later on.  It's simpler for us to understand caller's
    1793              :  * requirements than it would be for caller to understand when or how a
    1794              :  * deleted page became deleted after the fact.
    1795              :  *
    1796              :  * NOTE: this leaks memory.  Rather than trying to clean up everything
    1797              :  * carefully, it's better to run it in a temp context that can be reset
    1798              :  * frequently.
    1799              :  */
    1800              : void
    1801         2970 : _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate)
    1802              : {
    1803              :     BlockNumber rightsib;
    1804              :     bool        rightsib_empty;
    1805              :     Page        page;
    1806              :     BTPageOpaque opaque;
    1807              : 
    1808              :     /*
    1809              :      * Save original leafbuf block number from caller.  Only deleted blocks
    1810              :      * that are <= scanblkno are added to bulk delete stat's pages_deleted
    1811              :      * count.
    1812              :      */
    1813         2970 :     BlockNumber scanblkno = BufferGetBlockNumber(leafbuf);
    1814              : 
    1815              :     /*
    1816              :      * "stack" is a search stack leading (approximately) to the target page.
    1817              :      * It is initially NULL, but when iterating, we keep it to avoid
    1818              :      * duplicated search effort.
    1819              :      *
    1820              :      * Also, when "stack" is not NULL, we have already checked that the
    1821              :      * current page is not the right half of an incomplete split, i.e. the
    1822              :      * left sibling does not have its INCOMPLETE_SPLIT flag set, including
    1823              :      * when the current target page is to the right of caller's initial page
    1824              :      * (the scanblkno page).
    1825              :      */
    1826         2970 :     BTStack     stack = NULL;
    1827              : 
    1828              :     for (;;)
    1829              :     {
    1830         5842 :         page = BufferGetPage(leafbuf);
    1831         5842 :         opaque = BTPageGetOpaque(page);
    1832              : 
    1833              :         /*
    1834              :          * Internal pages are never deleted directly, only as part of deleting
    1835              :          * the whole subtree all the way down to leaf level.
    1836              :          *
    1837              :          * Also check for deleted pages here.  Caller never passes us a fully
    1838              :          * deleted page.  Only VACUUM can delete pages, so there can't have
    1839              :          * been a concurrent deletion.  Assume that we reached any deleted
    1840              :          * page encountered here by following a sibling link, and that the
    1841              :          * index is corrupt.
    1842              :          */
    1843              :         Assert(!P_ISDELETED(opaque));
    1844         5842 :         if (!P_ISLEAF(opaque) || P_ISDELETED(opaque))
    1845              :         {
    1846              :             /*
    1847              :              * Pre-9.4 page deletion only marked internal pages as half-dead,
    1848              :              * but now we only use that flag on leaf pages. The old algorithm
    1849              :              * was never supposed to leave half-dead pages in the tree, it was
    1850              :              * just a transient state, but it was nevertheless possible in
    1851              :              * error scenarios. We don't know how to deal with them here. They
    1852              :              * are harmless as far as searches are considered, but inserts
    1853              :              * into the deleted keyspace could add out-of-order downlinks in
    1854              :              * the upper levels. Log a notice, hopefully the admin will notice
    1855              :              * and reindex.
    1856              :              */
    1857            0 :             if (P_ISHALFDEAD(opaque))
    1858            0 :                 ereport(LOG,
    1859              :                         (errcode(ERRCODE_INDEX_CORRUPTED),
    1860              :                          errmsg("index \"%s\" contains a half-dead internal page",
    1861              :                                 RelationGetRelationName(rel)),
    1862              :                          errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
    1863              : 
    1864            0 :             if (P_ISDELETED(opaque))
    1865            0 :                 ereport(LOG,
    1866              :                         (errcode(ERRCODE_INDEX_CORRUPTED),
    1867              :                          errmsg_internal("found deleted block %u while following right link from block %u in index \"%s\"",
    1868              :                                          BufferGetBlockNumber(leafbuf),
    1869              :                                          scanblkno,
    1870              :                                          RelationGetRelationName(rel))));
    1871              : 
    1872            0 :             _bt_relbuf(rel, leafbuf);
    1873          100 :             return;
    1874              :         }
    1875              : 
    1876              :         /*
    1877              :          * We can never delete rightmost pages nor root pages.  While at it,
    1878              :          * check that page is empty, since it's possible that the leafbuf page
    1879              :          * was empty a moment ago, but has since had some inserts.
    1880              :          *
    1881              :          * To keep the algorithm simple, we also never delete an incompletely
    1882              :          * split page (they should be rare enough that this doesn't make any
    1883              :          * meaningful difference to disk usage):
    1884              :          *
    1885              :          * The INCOMPLETE_SPLIT flag on the page tells us if the page is the
    1886              :          * left half of an incomplete split, but ensuring that it's not the
    1887              :          * right half is more complicated.  For that, we have to check that
    1888              :          * the left sibling doesn't have its INCOMPLETE_SPLIT flag set using
    1889              :          * _bt_leftsib_splitflag().  On the first iteration, we temporarily
    1890              :          * release the lock on scanblkno/leafbuf, check the left sibling, and
    1891              :          * construct a search stack to scanblkno.  On subsequent iterations,
    1892              :          * we know we stepped right from a page that passed these tests, so
    1893              :          * it's OK.
    1894              :          */
    1895         5842 :         if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) ||
    1896         5743 :             P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
    1897         5743 :             P_INCOMPLETE_SPLIT(opaque))
    1898              :         {
    1899              :             /* Should never fail to delete a half-dead page */
    1900              :             Assert(!P_ISHALFDEAD(opaque));
    1901              : 
    1902           99 :             _bt_relbuf(rel, leafbuf);
    1903           99 :             return;
    1904              :         }
    1905              : 
    1906              :         /*
    1907              :          * First, remove downlink pointing to the page (or a parent of the
    1908              :          * page, if we are going to delete a taller subtree), and mark the
    1909              :          * leafbuf page half-dead
    1910              :          */
    1911         5743 :         if (!P_ISHALFDEAD(opaque))
    1912              :         {
    1913              :             /*
    1914              :              * We need an approximate pointer to the page's parent page.  We
    1915              :              * use a variant of the standard search mechanism to search for
    1916              :              * the page's high key; this will give us a link to either the
    1917              :              * current parent or someplace to its left (if there are multiple
    1918              :              * equal high keys, which is possible with !heapkeyspace indexes).
    1919              :              *
    1920              :              * Also check if this is the right-half of an incomplete split
    1921              :              * (see comment above).
    1922              :              */
    1923         5743 :             if (!stack)
    1924         2871 :             {
    1925              :                 BTScanInsert itup_key;
    1926              :                 ItemId      itemid;
    1927              :                 IndexTuple  targetkey;
    1928              :                 BlockNumber leftsib,
    1929              :                             leafblkno;
    1930              :                 Buffer      sleafbuf;
    1931              : 
    1932         2871 :                 itemid = PageGetItemId(page, P_HIKEY);
    1933         2871 :                 targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
    1934              : 
    1935         2871 :                 leftsib = opaque->btpo_prev;
    1936         2871 :                 leafblkno = BufferGetBlockNumber(leafbuf);
    1937              : 
    1938              :                 /*
    1939              :                  * To avoid deadlocks, we'd better drop the leaf page lock
    1940              :                  * before going further.
    1941              :                  */
    1942         2871 :                 _bt_unlockbuf(rel, leafbuf);
    1943              : 
    1944              :                 /*
    1945              :                  * Check that the left sibling of leafbuf (if any) is not
    1946              :                  * marked with INCOMPLETE_SPLIT flag before proceeding
    1947              :                  */
    1948              :                 Assert(leafblkno == scanblkno);
    1949         2871 :                 if (_bt_leftsib_splitflag(rel, leftsib, leafblkno))
    1950              :                 {
    1951            0 :                     ReleaseBuffer(leafbuf);
    1952            0 :                     return;
    1953              :                 }
    1954              : 
    1955              :                 /*
    1956              :                  * We need an insertion scan key, so build one.
    1957              :                  *
    1958              :                  * _bt_search searches for the leaf page that contains any
    1959              :                  * matching non-pivot tuples, but we need it to "search" for
    1960              :                  * the high key pivot from the page that we're set to delete.
    1961              :                  * Compensate for the mismatch by having _bt_search locate the
    1962              :                  * last position < equal-to-untruncated-prefix non-pivots.
    1963              :                  */
    1964         2871 :                 itup_key = _bt_mkscankey(rel, targetkey);
    1965              : 
    1966              :                 /* Set up a BTLessStrategyNumber-like insertion scan key */
    1967         2871 :                 itup_key->nextkey = false;
    1968         2871 :                 itup_key->backward = true;
    1969         2871 :                 stack = _bt_search(rel, NULL, itup_key, &sleafbuf, BT_READ);
    1970              :                 /* won't need a second lock or pin on leafbuf */
    1971         2871 :                 _bt_relbuf(rel, sleafbuf);
    1972              : 
    1973              :                 /*
    1974              :                  * Re-lock the leaf page, and start over to use our stack
    1975              :                  * within _bt_mark_page_halfdead.  We must do it that way
    1976              :                  * because it's possible that leafbuf can no longer be
    1977              :                  * deleted.  We need to recheck.
    1978              :                  *
    1979              :                  * Note: We can't simply hold on to the sleafbuf lock instead,
    1980              :                  * because it's barely possible that sleafbuf is not the same
    1981              :                  * page as leafbuf.  This happens when leafbuf split after our
    1982              :                  * original lock was dropped, but before _bt_search finished
    1983              :                  * its descent.  We rely on the assumption that we'll find
    1984              :                  * leafbuf isn't safe to delete anymore in this scenario.
    1985              :                  * (Page deletion can cope with the stack being to the left of
    1986              :                  * leafbuf, but not to the right of leafbuf.)
    1987              :                  */
    1988         2871 :                 _bt_lockbuf(rel, leafbuf, BT_WRITE);
    1989         2871 :                 continue;
    1990              :             }
    1991              : 
    1992              :             /*
    1993              :              * See if it's safe to delete the leaf page, and determine how
    1994              :              * many parent/internal pages above the leaf level will be
    1995              :              * deleted.  If it's safe then _bt_mark_page_halfdead will also
    1996              :              * perform the first phase of deletion, which includes marking the
    1997              :              * leafbuf page half-dead.
    1998              :              */
    1999              :             Assert(P_ISLEAF(opaque) && !P_IGNORE(opaque));
    2000         2872 :             if (!_bt_mark_page_halfdead(rel, vstate->info->heaprel, leafbuf,
    2001              :                                         stack))
    2002              :             {
    2003            1 :                 _bt_relbuf(rel, leafbuf);
    2004            1 :                 return;
    2005              :             }
    2006              :         }
    2007              :         else
    2008              :         {
    2009            0 :             INJECTION_POINT("nbtree-finish-half-dead-page-vacuum", NULL);
    2010              :         }
    2011              : 
    2012              :         /*
    2013              :          * Then unlink it from its siblings.  Each call to
    2014              :          * _bt_unlink_halfdead_page unlinks the topmost page from the subtree,
    2015              :          * making it shallower.  Iterate until the leafbuf page is deleted.
    2016              :          */
    2017         2871 :         rightsib_empty = false;
    2018              :         Assert(P_ISLEAF(opaque) && P_ISHALFDEAD(opaque));
    2019         5828 :         while (P_ISHALFDEAD(opaque))
    2020              :         {
    2021              :             /* Check for interrupts in _bt_unlink_halfdead_page */
    2022         2957 :             if (!_bt_unlink_halfdead_page(rel, leafbuf, scanblkno,
    2023              :                                           &rightsib_empty, vstate))
    2024              :             {
    2025              :                 /*
    2026              :                  * _bt_unlink_halfdead_page should never fail, since we
    2027              :                  * established that deletion is generally safe in
    2028              :                  * _bt_mark_page_halfdead -- index must be corrupt.
    2029              :                  *
    2030              :                  * Note that _bt_unlink_halfdead_page already released the
    2031              :                  * lock and pin on leafbuf for us.
    2032              :                  */
    2033              :                 Assert(false);
    2034            0 :                 return;
    2035              :             }
    2036              :         }
    2037              : 
    2038              :         Assert(P_ISLEAF(opaque) && P_ISDELETED(opaque));
    2039              : 
    2040         2871 :         rightsib = opaque->btpo_next;
    2041              : 
    2042         2871 :         _bt_relbuf(rel, leafbuf);
    2043              : 
    2044              :         /*
    2045              :          * Check here, as calling loops will have locks held, preventing
    2046              :          * interrupts from being processed.
    2047              :          */
    2048         2871 :         CHECK_FOR_INTERRUPTS();
    2049              : 
    2050              :         /*
    2051              :          * The page has now been deleted. If its right sibling is completely
    2052              :          * empty, it's possible that the reason we haven't deleted it earlier
    2053              :          * is that it was the rightmost child of the parent. Now that we
    2054              :          * removed the downlink for this page, the right sibling might now be
    2055              :          * the only child of the parent, and could be removed. It would be
    2056              :          * picked up by the next vacuum anyway, but might as well try to
    2057              :          * remove it now, so loop back to process the right sibling.
    2058              :          *
    2059              :          * Note: This relies on the assumption that _bt_getstackbuf() will be
    2060              :          * able to reuse our original descent stack with a different child
    2061              :          * block (provided that the child block is to the right of the
    2062              :          * original leaf page reached by _bt_search()). It will even update
    2063              :          * the descent stack each time we loop around, avoiding repeated work.
    2064              :          */
    2065         2871 :         if (!rightsib_empty)
    2066         2870 :             break;
    2067              : 
    2068            1 :         leafbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
    2069              :     }
    2070              : }
    2071              : 
    2072              : /*
    2073              :  * First stage of page deletion.
    2074              :  *
    2075              :  * Establish the height of the to-be-deleted subtree with leafbuf at its
    2076              :  * lowest level, remove the downlink to the subtree, and mark leafbuf
    2077              :  * half-dead.  The final to-be-deleted subtree is usually just leafbuf itself,
    2078              :  * but may include additional internal pages (at most one per level of the
    2079              :  * tree below the root).
    2080              :  *
    2081              :  * Caller must pass a valid heaprel, since it's just about possible that our
    2082              :  * call to _bt_lock_subtree_parent will need to allocate a new index page to
    2083              :  * complete a page split.  Every call to _bt_allocbuf needs to pass a heaprel.
    2084              :  *
    2085              :  * Returns 'false' if leafbuf is unsafe to delete, usually because leafbuf is
    2086              :  * the rightmost child of its parent (and parent has more than one downlink).
    2087              :  * Returns 'true' when the first stage of page deletion completed
    2088              :  * successfully.
    2089              :  */
    2090              : static bool
    2091         2872 : _bt_mark_page_halfdead(Relation rel, Relation heaprel, Buffer leafbuf,
    2092              :                        BTStack stack)
    2093              : {
    2094              :     BlockNumber leafblkno;
    2095              :     BlockNumber leafrightsib;
    2096              :     BlockNumber topparent;
    2097              :     BlockNumber topparentrightsib;
    2098              :     ItemId      itemid;
    2099              :     Page        page;
    2100              :     BTPageOpaque opaque;
    2101              :     Buffer      subtreeparent;
    2102              :     OffsetNumber poffset;
    2103              :     OffsetNumber nextoffset;
    2104              :     IndexTuple  itup;
    2105              :     IndexTupleData trunctuple;
    2106              : 
    2107         2872 :     page = BufferGetPage(leafbuf);
    2108         2872 :     opaque = BTPageGetOpaque(page);
    2109              : 
    2110              :     Assert(!P_RIGHTMOST(opaque) && !P_ISROOT(opaque) &&
    2111              :            P_ISLEAF(opaque) && !P_IGNORE(opaque) &&
    2112              :            P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
    2113              :     Assert(heaprel != NULL);
    2114              : 
    2115              :     /*
    2116              :      * Save info about the leaf page.
    2117              :      */
    2118         2872 :     leafblkno = BufferGetBlockNumber(leafbuf);
    2119         2872 :     leafrightsib = opaque->btpo_next;
    2120              : 
    2121              :     /*
    2122              :      * Before attempting to lock the parent page, check that the right sibling
    2123              :      * is not in half-dead state.  A half-dead right sibling would have no
    2124              :      * downlink in the parent, which would be highly confusing later when we
    2125              :      * delete the downlink.  It would fail the "right sibling of target page
    2126              :      * is also the next child in parent page" cross-check below.
    2127              :      */
    2128         2872 :     if (_bt_rightsib_halfdeadflag(rel, leafrightsib))
    2129              :     {
    2130            0 :         elog(DEBUG1, "could not delete page %u because its right sibling %u is half-dead",
    2131              :              leafblkno, leafrightsib);
    2132            0 :         return false;
    2133              :     }
    2134              : 
    2135              :     /*
    2136              :      * We cannot delete a page that is the rightmost child of its immediate
    2137              :      * parent, unless it is the only child --- in which case the parent has to
    2138              :      * be deleted too, and the same condition applies recursively to it. We
    2139              :      * have to check this condition all the way up before trying to delete,
    2140              :      * and lock the parent of the root of the to-be-deleted subtree (the
    2141              :      * "subtree parent").  _bt_lock_subtree_parent() locks the subtree parent
    2142              :      * for us.  We remove the downlink to the "top parent" page (subtree root
    2143              :      * page) from the subtree parent page below.
    2144              :      *
    2145              :      * Initialize topparent to be leafbuf page now.  The final to-be-deleted
    2146              :      * subtree is often a degenerate one page subtree consisting only of the
    2147              :      * leafbuf page.  When that happens, the leafbuf page is the final subtree
    2148              :      * root page/top parent page.
    2149              :      */
    2150         2872 :     topparent = leafblkno;
    2151         2872 :     topparentrightsib = leafrightsib;
    2152         2872 :     if (!_bt_lock_subtree_parent(rel, heaprel, leafblkno, stack,
    2153              :                                  &subtreeparent, &poffset,
    2154              :                                  &topparent, &topparentrightsib))
    2155            1 :         return false;
    2156              : 
    2157         2871 :     page = BufferGetPage(subtreeparent);
    2158         2871 :     opaque = BTPageGetOpaque(page);
    2159              : 
    2160              : #ifdef USE_ASSERT_CHECKING
    2161              : 
    2162              :     /*
    2163              :      * This is just an assertion because _bt_lock_subtree_parent should have
    2164              :      * guaranteed tuple has the expected contents
    2165              :      */
    2166              :     itemid = PageGetItemId(page, poffset);
    2167              :     itup = (IndexTuple) PageGetItem(page, itemid);
    2168              :     Assert(BTreeTupleGetDownLink(itup) == topparent);
    2169              : #endif
    2170              : 
    2171         2871 :     nextoffset = OffsetNumberNext(poffset);
    2172         2871 :     itemid = PageGetItemId(page, nextoffset);
    2173         2871 :     itup = (IndexTuple) PageGetItem(page, itemid);
    2174              : 
    2175              :     /*
    2176              :      * Check that the parent-page index items we're about to delete/overwrite
    2177              :      * in subtree parent page contain what we expect.  This can fail if the
    2178              :      * index has become corrupt for some reason.  When that happens we back
    2179              :      * out of deletion of the leafbuf subtree.  (This is just like the case
    2180              :      * where _bt_lock_subtree_parent() cannot "re-find" leafbuf's downlink.)
    2181              :      */
    2182         2871 :     if (BTreeTupleGetDownLink(itup) != topparentrightsib)
    2183              :     {
    2184            0 :         ereport(LOG,
    2185              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2186              :                  errmsg_internal("right sibling %u of block %u is not next child %u of block %u in index \"%s\"",
    2187              :                                  topparentrightsib, topparent,
    2188              :                                  BTreeTupleGetDownLink(itup),
    2189              :                                  BufferGetBlockNumber(subtreeparent),
    2190              :                                  RelationGetRelationName(rel))));
    2191              : 
    2192            0 :         _bt_relbuf(rel, subtreeparent);
    2193              :         Assert(false);
    2194            0 :         return false;
    2195              :     }
    2196              : 
    2197              :     /*
    2198              :      * Any insert which would have gone on the leaf block will now go to its
    2199              :      * right sibling.  In other words, the key space moves right.
    2200              :      */
    2201         2871 :     PredicateLockPageCombine(rel, leafblkno, leafrightsib);
    2202              : 
    2203              :     /* No ereport(ERROR) until changes are logged */
    2204         2871 :     START_CRIT_SECTION();
    2205              : 
    2206              :     /*
    2207              :      * Update parent of subtree.  We want to delete the downlink to the top
    2208              :      * parent page/root of the subtree, and the *following* key.  Easiest way
    2209              :      * is to copy the right sibling's downlink over the downlink that points
    2210              :      * to top parent page, and then delete the right sibling's original pivot
    2211              :      * tuple.
    2212              :      *
    2213              :      * Lanin and Shasha make the key space move left when deleting a page,
    2214              :      * whereas the key space moves right here.  That's why we cannot simply
    2215              :      * delete the pivot tuple with the downlink to the top parent page.  See
    2216              :      * nbtree/README.
    2217              :      */
    2218         2871 :     page = BufferGetPage(subtreeparent);
    2219         2871 :     opaque = BTPageGetOpaque(page);
    2220              : 
    2221         2871 :     itemid = PageGetItemId(page, poffset);
    2222         2871 :     itup = (IndexTuple) PageGetItem(page, itemid);
    2223         2871 :     BTreeTupleSetDownLink(itup, topparentrightsib);
    2224              : 
    2225         2871 :     nextoffset = OffsetNumberNext(poffset);
    2226         2871 :     PageIndexTupleDelete(page, nextoffset);
    2227              : 
    2228              :     /*
    2229              :      * Mark the leaf page as half-dead, and stamp it with a link to the top
    2230              :      * parent page.  When the leaf page is also the top parent page, the link
    2231              :      * is set to InvalidBlockNumber.
    2232              :      */
    2233         2871 :     page = BufferGetPage(leafbuf);
    2234         2871 :     opaque = BTPageGetOpaque(page);
    2235         2871 :     opaque->btpo_flags |= BTP_HALF_DEAD;
    2236              : 
    2237              :     Assert(PageGetMaxOffsetNumber(page) == P_HIKEY);
    2238         2871 :     MemSet(&trunctuple, 0, sizeof(IndexTupleData));
    2239         2871 :     trunctuple.t_info = sizeof(IndexTupleData);
    2240         2871 :     if (topparent != leafblkno)
    2241           46 :         BTreeTupleSetTopParent(&trunctuple, topparent);
    2242              :     else
    2243         2825 :         BTreeTupleSetTopParent(&trunctuple, InvalidBlockNumber);
    2244              : 
    2245         2871 :     if (!PageIndexTupleOverwrite(page, P_HIKEY, &trunctuple, IndexTupleSize(&trunctuple)))
    2246            0 :         elog(ERROR, "could not overwrite high key in half-dead page");
    2247              : 
    2248              :     /* Must mark buffers dirty before XLogInsert */
    2249         2871 :     MarkBufferDirty(subtreeparent);
    2250         2871 :     MarkBufferDirty(leafbuf);
    2251              : 
    2252              :     /* XLOG stuff */
    2253         2871 :     if (RelationNeedsWAL(rel))
    2254              :     {
    2255              :         xl_btree_mark_page_halfdead xlrec;
    2256              :         XLogRecPtr  recptr;
    2257              : 
    2258         2871 :         xlrec.poffset = poffset;
    2259         2871 :         xlrec.leafblk = leafblkno;
    2260         2871 :         if (topparent != leafblkno)
    2261           46 :             xlrec.topparent = topparent;
    2262              :         else
    2263         2825 :             xlrec.topparent = InvalidBlockNumber;
    2264              : 
    2265         2871 :         XLogBeginInsert();
    2266         2871 :         XLogRegisterBuffer(0, leafbuf, REGBUF_WILL_INIT);
    2267         2871 :         XLogRegisterBuffer(1, subtreeparent, REGBUF_STANDARD);
    2268              : 
    2269         2871 :         page = BufferGetPage(leafbuf);
    2270         2871 :         opaque = BTPageGetOpaque(page);
    2271         2871 :         xlrec.leftblk = opaque->btpo_prev;
    2272         2871 :         xlrec.rightblk = opaque->btpo_next;
    2273              : 
    2274         2871 :         XLogRegisterData(&xlrec, SizeOfBtreeMarkPageHalfDead);
    2275              : 
    2276         2871 :         recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_MARK_PAGE_HALFDEAD);
    2277              : 
    2278         2871 :         page = BufferGetPage(subtreeparent);
    2279         2871 :         PageSetLSN(page, recptr);
    2280         2871 :         page = BufferGetPage(leafbuf);
    2281         2871 :         PageSetLSN(page, recptr);
    2282              :     }
    2283              : 
    2284         2871 :     END_CRIT_SECTION();
    2285              : 
    2286         2871 :     _bt_relbuf(rel, subtreeparent);
    2287         2871 :     return true;
    2288              : }
    2289              : 
    2290              : /*
    2291              :  * Second stage of page deletion.
    2292              :  *
    2293              :  * Unlinks a single page (in the subtree undergoing deletion) from its
    2294              :  * siblings.  Also marks the page deleted.
    2295              :  *
    2296              :  * To get rid of the whole subtree, including the leaf page itself, call here
    2297              :  * until the leaf page is deleted.  The original "top parent" established in
    2298              :  * the first stage of deletion is deleted in the first call here, while the
    2299              :  * leaf page is deleted in the last call here.  Note that the leaf page itself
    2300              :  * is often the initial top parent page.
    2301              :  *
    2302              :  * Returns 'false' if the page could not be unlinked (shouldn't happen).  If
    2303              :  * the right sibling of the current target page is empty, *rightsib_empty is
    2304              :  * set to true, allowing caller to delete the target's right sibling page in
    2305              :  * passing.  Note that *rightsib_empty is only actually used by caller when
    2306              :  * target page is leafbuf, following last call here for leafbuf/the subtree
    2307              :  * containing leafbuf.  (We always set *rightsib_empty for caller, just to be
    2308              :  * consistent.)
    2309              :  *
    2310              :  * Must hold pin and lock on leafbuf at entry (read or write doesn't matter).
    2311              :  * On success exit, we'll be holding pin and write lock.  On failure exit,
    2312              :  * we'll release both pin and lock before returning (we define it that way
    2313              :  * to avoid having to reacquire a lock we already released).
    2314              :  */
    2315              : static bool
    2316         2957 : _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
    2317              :                          bool *rightsib_empty, BTVacState *vstate)
    2318              : {
    2319         2957 :     BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
    2320         2957 :     IndexBulkDeleteResult *stats = vstate->stats;
    2321              :     BlockNumber leafleftsib;
    2322              :     BlockNumber leafrightsib;
    2323              :     BlockNumber target;
    2324              :     BlockNumber leftsib;
    2325              :     BlockNumber rightsib;
    2326         2957 :     Buffer      lbuf = InvalidBuffer;
    2327              :     Buffer      buf;
    2328              :     Buffer      rbuf;
    2329         2957 :     Buffer      metabuf = InvalidBuffer;
    2330         2957 :     Page        metapg = NULL;
    2331         2957 :     BTMetaPageData *metad = NULL;
    2332              :     ItemId      itemid;
    2333              :     Page        page;
    2334              :     BTPageOpaque opaque;
    2335              :     FullTransactionId safexid;
    2336              :     bool        rightsib_is_rightmost;
    2337              :     uint32      targetlevel;
    2338              :     IndexTuple  leafhikey;
    2339              :     BlockNumber leaftopparent;
    2340              : 
    2341         2957 :     page = BufferGetPage(leafbuf);
    2342         2957 :     opaque = BTPageGetOpaque(page);
    2343              : 
    2344              :     Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque) && P_ISHALFDEAD(opaque));
    2345              : 
    2346              :     /*
    2347              :      * Remember some information about the leaf page.
    2348              :      */
    2349         2957 :     itemid = PageGetItemId(page, P_HIKEY);
    2350         2957 :     leafhikey = (IndexTuple) PageGetItem(page, itemid);
    2351         2957 :     target = BTreeTupleGetTopParent(leafhikey);
    2352         2957 :     leafleftsib = opaque->btpo_prev;
    2353         2957 :     leafrightsib = opaque->btpo_next;
    2354              : 
    2355         2957 :     _bt_unlockbuf(rel, leafbuf);
    2356              : 
    2357         2957 :     INJECTION_POINT("nbtree-leave-page-half-dead", NULL);
    2358              : 
    2359              :     /*
    2360              :      * Check here, as calling loops will have locks held, preventing
    2361              :      * interrupts from being processed.
    2362              :      */
    2363         2957 :     CHECK_FOR_INTERRUPTS();
    2364              : 
    2365              :     /* Unlink the current top parent of the subtree */
    2366         2957 :     if (!BlockNumberIsValid(target))
    2367              :     {
    2368              :         /* Target is leaf page (or leaf page is top parent, if you prefer) */
    2369         2871 :         target = leafblkno;
    2370              : 
    2371         2871 :         buf = leafbuf;
    2372         2871 :         leftsib = leafleftsib;
    2373         2871 :         targetlevel = 0;
    2374              :     }
    2375              :     else
    2376              :     {
    2377              :         /* Target is the internal page taken from leaf's top parent link */
    2378              :         Assert(target != leafblkno);
    2379              : 
    2380              :         /* Fetch the block number of the target's left sibling */
    2381           86 :         buf = _bt_getbuf(rel, target, BT_READ);
    2382           86 :         page = BufferGetPage(buf);
    2383           86 :         opaque = BTPageGetOpaque(page);
    2384           86 :         leftsib = opaque->btpo_prev;
    2385           86 :         targetlevel = opaque->btpo_level;
    2386              :         Assert(targetlevel > 0);
    2387              : 
    2388              :         /*
    2389              :          * To avoid deadlocks, we'd better drop the target page lock before
    2390              :          * going further.
    2391              :          */
    2392           86 :         _bt_unlockbuf(rel, buf);
    2393              :     }
    2394              : 
    2395              :     /*
    2396              :      * We have to lock the pages we need to modify in the standard order:
    2397              :      * moving right, then up.  Else we will deadlock against other writers.
    2398              :      *
    2399              :      * So, first lock the leaf page, if it's not the target.  Then find and
    2400              :      * write-lock the current left sibling of the target page.  The sibling
    2401              :      * that was current a moment ago could have split, so we may have to move
    2402              :      * right.
    2403              :      */
    2404         2957 :     if (target != leafblkno)
    2405           86 :         _bt_lockbuf(rel, leafbuf, BT_WRITE);
    2406         2957 :     if (leftsib != P_NONE)
    2407              :     {
    2408          687 :         lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
    2409          687 :         page = BufferGetPage(lbuf);
    2410          687 :         opaque = BTPageGetOpaque(page);
    2411          687 :         while (P_ISDELETED(opaque) || opaque->btpo_next != target)
    2412              :         {
    2413            0 :             bool        leftsibvalid = true;
    2414              : 
    2415              :             /*
    2416              :              * Before we follow the link from the page that was the left
    2417              :              * sibling mere moments ago, validate its right link.  This
    2418              :              * reduces the opportunities for loop to fail to ever make any
    2419              :              * progress in the presence of index corruption.
    2420              :              *
    2421              :              * Note: we rely on the assumption that there can only be one
    2422              :              * vacuum process running at a time (against the same index).
    2423              :              */
    2424            0 :             if (P_RIGHTMOST(opaque) || P_ISDELETED(opaque) ||
    2425            0 :                 leftsib == opaque->btpo_next)
    2426            0 :                 leftsibvalid = false;
    2427              : 
    2428            0 :             leftsib = opaque->btpo_next;
    2429            0 :             _bt_relbuf(rel, lbuf);
    2430              : 
    2431            0 :             if (!leftsibvalid)
    2432              :             {
    2433              :                 /*
    2434              :                  * This is known to fail in the field; sibling link corruption
    2435              :                  * is relatively common.  Press on with vacuuming rather than
    2436              :                  * just throwing an ERROR.
    2437              :                  */
    2438            0 :                 ereport(LOG,
    2439              :                         (errcode(ERRCODE_INDEX_CORRUPTED),
    2440              :                          errmsg_internal("valid left sibling for deletion target could not be located: "
    2441              :                                          "left sibling %u of target %u with leafblkno %u and scanblkno %u on level %u of index \"%s\"",
    2442              :                                          leftsib, target, leafblkno, scanblkno,
    2443              :                                          targetlevel, RelationGetRelationName(rel))));
    2444              : 
    2445              :                 /* Must release all pins and locks on failure exit */
    2446            0 :                 ReleaseBuffer(buf);
    2447            0 :                 if (target != leafblkno)
    2448            0 :                     _bt_relbuf(rel, leafbuf);
    2449              : 
    2450            0 :                 return false;
    2451              :             }
    2452              : 
    2453            0 :             CHECK_FOR_INTERRUPTS();
    2454              : 
    2455              :             /* step right one page */
    2456            0 :             lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
    2457            0 :             page = BufferGetPage(lbuf);
    2458            0 :             opaque = BTPageGetOpaque(page);
    2459              :         }
    2460              :     }
    2461              :     else
    2462         2270 :         lbuf = InvalidBuffer;
    2463              : 
    2464              :     /* Next write-lock the target page itself */
    2465         2957 :     _bt_lockbuf(rel, buf, BT_WRITE);
    2466         2957 :     page = BufferGetPage(buf);
    2467         2957 :     opaque = BTPageGetOpaque(page);
    2468              : 
    2469              :     /*
    2470              :      * Check page is still empty etc, else abandon deletion.  This is just for
    2471              :      * paranoia's sake; a half-dead page cannot resurrect because there can be
    2472              :      * only one vacuum process running at a time.
    2473              :      */
    2474         2957 :     if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque))
    2475            0 :         elog(ERROR, "target page changed status unexpectedly in block %u of index \"%s\"",
    2476              :              target, RelationGetRelationName(rel));
    2477              : 
    2478         2957 :     if (opaque->btpo_prev != leftsib)
    2479            0 :         ereport(ERROR,
    2480              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2481              :                  errmsg_internal("target page left link unexpectedly changed from %u to %u in block %u of index \"%s\"",
    2482              :                                  leftsib, opaque->btpo_prev, target,
    2483              :                                  RelationGetRelationName(rel))));
    2484              : 
    2485         2957 :     if (target == leafblkno)
    2486              :     {
    2487         2871 :         if (P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
    2488         2871 :             !P_ISLEAF(opaque) || !P_ISHALFDEAD(opaque))
    2489            0 :             elog(ERROR, "target leaf page changed status unexpectedly in block %u of index \"%s\"",
    2490              :                  target, RelationGetRelationName(rel));
    2491              : 
    2492              :         /* Leaf page is also target page: don't set leaftopparent */
    2493         2871 :         leaftopparent = InvalidBlockNumber;
    2494              :     }
    2495              :     else
    2496              :     {
    2497              :         IndexTuple  finaldataitem;
    2498              : 
    2499           86 :         if (P_FIRSTDATAKEY(opaque) != PageGetMaxOffsetNumber(page) ||
    2500           86 :             P_ISLEAF(opaque))
    2501            0 :             elog(ERROR, "target internal page on level %u changed status unexpectedly in block %u of index \"%s\"",
    2502              :                  targetlevel, target, RelationGetRelationName(rel));
    2503              : 
    2504              :         /* Target is internal: set leaftopparent for next call here...  */
    2505           86 :         itemid = PageGetItemId(page, P_FIRSTDATAKEY(opaque));
    2506           86 :         finaldataitem = (IndexTuple) PageGetItem(page, itemid);
    2507           86 :         leaftopparent = BTreeTupleGetDownLink(finaldataitem);
    2508              :         /* ...except when it would be a redundant pointer-to-self */
    2509           86 :         if (leaftopparent == leafblkno)
    2510           46 :             leaftopparent = InvalidBlockNumber;
    2511              :     }
    2512              : 
    2513              :     /* No leaftopparent for level 0 (leaf page) or level 1 target */
    2514              :     Assert(!BlockNumberIsValid(leaftopparent) || targetlevel > 1);
    2515              : 
    2516              :     /*
    2517              :      * And next write-lock the (current) right sibling.
    2518              :      */
    2519         2957 :     rightsib = opaque->btpo_next;
    2520         2957 :     rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
    2521         2957 :     page = BufferGetPage(rbuf);
    2522         2957 :     opaque = BTPageGetOpaque(page);
    2523              : 
    2524              :     /*
    2525              :      * Validate target's right sibling page.  Its left link must point back to
    2526              :      * the target page.
    2527              :      */
    2528         2957 :     if (opaque->btpo_prev != target)
    2529              :     {
    2530              :         /*
    2531              :          * This is known to fail in the field; sibling link corruption is
    2532              :          * relatively common.  Press on with vacuuming rather than just
    2533              :          * throwing an ERROR (same approach used for left-sibling's-right-link
    2534              :          * validation check a moment ago).
    2535              :          */
    2536            0 :         ereport(LOG,
    2537              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2538              :                  errmsg_internal("right sibling's left-link doesn't match: "
    2539              :                                  "right sibling %u of target %u with leafblkno %u "
    2540              :                                  "and scanblkno %u spuriously links to non-target %u "
    2541              :                                  "on level %u of index \"%s\"",
    2542              :                                  rightsib, target, leafblkno,
    2543              :                                  scanblkno, opaque->btpo_prev,
    2544              :                                  targetlevel, RelationGetRelationName(rel))));
    2545              : 
    2546              :         /* Must release all pins and locks on failure exit */
    2547            0 :         if (BufferIsValid(lbuf))
    2548            0 :             _bt_relbuf(rel, lbuf);
    2549            0 :         _bt_relbuf(rel, rbuf);
    2550            0 :         _bt_relbuf(rel, buf);
    2551            0 :         if (target != leafblkno)
    2552            0 :             _bt_relbuf(rel, leafbuf);
    2553              : 
    2554            0 :         return false;
    2555              :     }
    2556              : 
    2557         2957 :     rightsib_is_rightmost = P_RIGHTMOST(opaque);
    2558         2957 :     *rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
    2559              : 
    2560              :     /*
    2561              :      * If we are deleting the next-to-last page on the target's level, then
    2562              :      * the rightsib is a candidate to become the new fast root. (In theory, it
    2563              :      * might be possible to push the fast root even further down, but the odds
    2564              :      * of doing so are slim, and the locking considerations daunting.)
    2565              :      *
    2566              :      * We can safely acquire a lock on the metapage here --- see comments for
    2567              :      * _bt_newlevel().
    2568              :      */
    2569         2957 :     if (leftsib == P_NONE && rightsib_is_rightmost)
    2570              :     {
    2571           28 :         page = BufferGetPage(rbuf);
    2572           28 :         opaque = BTPageGetOpaque(page);
    2573           28 :         if (P_RIGHTMOST(opaque))
    2574              :         {
    2575              :             /* rightsib will be the only one left on the level */
    2576           28 :             metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
    2577           28 :             metapg = BufferGetPage(metabuf);
    2578           28 :             metad = BTPageGetMeta(metapg);
    2579              : 
    2580              :             /*
    2581              :              * The expected case here is btm_fastlevel == targetlevel+1; if
    2582              :              * the fastlevel is <= targetlevel, something is wrong, and we
    2583              :              * choose to overwrite it to fix it.
    2584              :              */
    2585           28 :             if (metad->btm_fastlevel > targetlevel + 1)
    2586              :             {
    2587              :                 /* no update wanted */
    2588            0 :                 _bt_relbuf(rel, metabuf);
    2589            0 :                 metabuf = InvalidBuffer;
    2590              :             }
    2591              :         }
    2592              :     }
    2593              : 
    2594              :     /*
    2595              :      * Here we begin doing the deletion.
    2596              :      */
    2597              : 
    2598              :     /* No ereport(ERROR) until changes are logged */
    2599         2957 :     START_CRIT_SECTION();
    2600              : 
    2601              :     /*
    2602              :      * Update siblings' side-links.  Note the target page's side-links will
    2603              :      * continue to point to the siblings.  Asserts here are just rechecking
    2604              :      * things we already verified above.
    2605              :      */
    2606         2957 :     if (BufferIsValid(lbuf))
    2607              :     {
    2608          687 :         page = BufferGetPage(lbuf);
    2609          687 :         opaque = BTPageGetOpaque(page);
    2610              :         Assert(opaque->btpo_next == target);
    2611          687 :         opaque->btpo_next = rightsib;
    2612              :     }
    2613         2957 :     page = BufferGetPage(rbuf);
    2614         2957 :     opaque = BTPageGetOpaque(page);
    2615              :     Assert(opaque->btpo_prev == target);
    2616         2957 :     opaque->btpo_prev = leftsib;
    2617              : 
    2618              :     /*
    2619              :      * If we deleted a parent of the targeted leaf page, instead of the leaf
    2620              :      * itself, update the leaf to point to the next remaining child in the
    2621              :      * subtree.
    2622              :      *
    2623              :      * Note: We rely on the fact that a buffer pin on the leaf page has been
    2624              :      * held since leafhikey was initialized.  This is safe, though only
    2625              :      * because the page was already half-dead at that point.  The leaf page
    2626              :      * cannot have been modified by any other backend during the period when
    2627              :      * no lock was held.
    2628              :      */
    2629         2957 :     if (target != leafblkno)
    2630           86 :         BTreeTupleSetTopParent(leafhikey, leaftopparent);
    2631              : 
    2632              :     /*
    2633              :      * Mark the page itself deleted.  It can be recycled when all current
    2634              :      * transactions are gone.  Storing GetTopTransactionId() would work, but
    2635              :      * we're in VACUUM and would not otherwise have an XID.  Having already
    2636              :      * updated links to the target, ReadNextFullTransactionId() suffices as an
    2637              :      * upper bound.  Any scan having retained a now-stale link is advertising
    2638              :      * in its PGPROC an xmin less than or equal to the value we read here.  It
    2639              :      * will continue to do so, holding back the xmin horizon, for the duration
    2640              :      * of that scan.
    2641              :      */
    2642         2957 :     page = BufferGetPage(buf);
    2643         2957 :     opaque = BTPageGetOpaque(page);
    2644              :     Assert(P_ISHALFDEAD(opaque) || !P_ISLEAF(opaque));
    2645              : 
    2646              :     /*
    2647              :      * Store upper bound XID that's used to determine when deleted page is no
    2648              :      * longer needed as a tombstone
    2649              :      */
    2650         2957 :     safexid = ReadNextFullTransactionId();
    2651         2957 :     BTPageSetDeleted(page, safexid);
    2652         2957 :     opaque->btpo_cycleid = 0;
    2653              : 
    2654              :     /* And update the metapage, if needed */
    2655         2957 :     if (BufferIsValid(metabuf))
    2656              :     {
    2657              :         /* upgrade metapage if needed */
    2658           28 :         if (metad->btm_version < BTREE_NOVAC_VERSION)
    2659            0 :             _bt_upgrademetapage(metapg);
    2660           28 :         metad->btm_fastroot = rightsib;
    2661           28 :         metad->btm_fastlevel = targetlevel;
    2662           28 :         MarkBufferDirty(metabuf);
    2663              :     }
    2664              : 
    2665              :     /* Must mark buffers dirty before XLogInsert */
    2666         2957 :     MarkBufferDirty(rbuf);
    2667         2957 :     MarkBufferDirty(buf);
    2668         2957 :     if (BufferIsValid(lbuf))
    2669          687 :         MarkBufferDirty(lbuf);
    2670         2957 :     if (target != leafblkno)
    2671           86 :         MarkBufferDirty(leafbuf);
    2672              : 
    2673              :     /* XLOG stuff */
    2674         2957 :     if (RelationNeedsWAL(rel))
    2675              :     {
    2676              :         xl_btree_unlink_page xlrec;
    2677              :         xl_btree_metadata xlmeta;
    2678              :         uint8       xlinfo;
    2679              :         XLogRecPtr  recptr;
    2680              : 
    2681         2957 :         XLogBeginInsert();
    2682              : 
    2683         2957 :         XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
    2684         2957 :         if (BufferIsValid(lbuf))
    2685          687 :             XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
    2686         2957 :         XLogRegisterBuffer(2, rbuf, REGBUF_STANDARD);
    2687         2957 :         if (target != leafblkno)
    2688           86 :             XLogRegisterBuffer(3, leafbuf, REGBUF_WILL_INIT);
    2689              : 
    2690              :         /* information stored on the target/to-be-unlinked block */
    2691         2957 :         xlrec.leftsib = leftsib;
    2692         2957 :         xlrec.rightsib = rightsib;
    2693         2957 :         xlrec.level = targetlevel;
    2694         2957 :         xlrec.safexid = safexid;
    2695              : 
    2696              :         /* information needed to recreate the leaf block (if not the target) */
    2697         2957 :         xlrec.leafleftsib = leafleftsib;
    2698         2957 :         xlrec.leafrightsib = leafrightsib;
    2699         2957 :         xlrec.leaftopparent = leaftopparent;
    2700              : 
    2701         2957 :         XLogRegisterData(&xlrec, SizeOfBtreeUnlinkPage);
    2702              : 
    2703         2957 :         if (BufferIsValid(metabuf))
    2704              :         {
    2705           28 :             XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
    2706              : 
    2707              :             Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
    2708           28 :             xlmeta.version = metad->btm_version;
    2709           28 :             xlmeta.root = metad->btm_root;
    2710           28 :             xlmeta.level = metad->btm_level;
    2711           28 :             xlmeta.fastroot = metad->btm_fastroot;
    2712           28 :             xlmeta.fastlevel = metad->btm_fastlevel;
    2713           28 :             xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
    2714           28 :             xlmeta.allequalimage = metad->btm_allequalimage;
    2715              : 
    2716           28 :             XLogRegisterBufData(4, &xlmeta, sizeof(xl_btree_metadata));
    2717           28 :             xlinfo = XLOG_BTREE_UNLINK_PAGE_META;
    2718              :         }
    2719              :         else
    2720         2929 :             xlinfo = XLOG_BTREE_UNLINK_PAGE;
    2721              : 
    2722         2957 :         recptr = XLogInsert(RM_BTREE_ID, xlinfo);
    2723              : 
    2724         2957 :         if (BufferIsValid(metabuf))
    2725              :         {
    2726           28 :             PageSetLSN(metapg, recptr);
    2727              :         }
    2728         2957 :         page = BufferGetPage(rbuf);
    2729         2957 :         PageSetLSN(page, recptr);
    2730         2957 :         page = BufferGetPage(buf);
    2731         2957 :         PageSetLSN(page, recptr);
    2732         2957 :         if (BufferIsValid(lbuf))
    2733              :         {
    2734          687 :             page = BufferGetPage(lbuf);
    2735          687 :             PageSetLSN(page, recptr);
    2736              :         }
    2737         2957 :         if (target != leafblkno)
    2738              :         {
    2739           86 :             page = BufferGetPage(leafbuf);
    2740           86 :             PageSetLSN(page, recptr);
    2741              :         }
    2742              :     }
    2743              : 
    2744         2957 :     END_CRIT_SECTION();
    2745              : 
    2746              :     /* release metapage */
    2747         2957 :     if (BufferIsValid(metabuf))
    2748           28 :         _bt_relbuf(rel, metabuf);
    2749              : 
    2750              :     /* release siblings */
    2751         2957 :     if (BufferIsValid(lbuf))
    2752          687 :         _bt_relbuf(rel, lbuf);
    2753         2957 :     _bt_relbuf(rel, rbuf);
    2754              : 
    2755              :     /* If the target is not leafbuf, we're done with it now -- release it */
    2756         2957 :     if (target != leafblkno)
    2757           86 :         _bt_relbuf(rel, buf);
    2758              : 
    2759              :     /*
    2760              :      * Maintain pages_newly_deleted, which is simply the number of pages
    2761              :      * deleted by the ongoing VACUUM operation.
    2762              :      *
    2763              :      * Maintain pages_deleted in a way that takes into account how
    2764              :      * btvacuumpage() will count deleted pages that have yet to become
    2765              :      * scanblkno -- only count page when it's not going to get that treatment
    2766              :      * later on.
    2767              :      */
    2768         2957 :     stats->pages_newly_deleted++;
    2769         2957 :     if (target <= scanblkno)
    2770         2878 :         stats->pages_deleted++;
    2771              : 
    2772              :     /*
    2773              :      * Remember information about the target page (now a newly deleted page)
    2774              :      * in dedicated vstate space for later.  The page will be considered as a
    2775              :      * candidate to place in the FSM at the end of the current btvacuumscan()
    2776              :      * call.
    2777              :      */
    2778         2957 :     _bt_pendingfsm_add(vstate, target, safexid);
    2779              : 
    2780              :     /* Success - hold on to lock on leafbuf (might also have been target) */
    2781         2957 :     return true;
    2782              : }
    2783              : 
    2784              : /*
    2785              :  * Establish how tall the to-be-deleted subtree will be during the first stage
    2786              :  * of page deletion.
    2787              :  *
    2788              :  * Caller's child argument is the block number of the page caller wants to
    2789              :  * delete (this is leafbuf's block number, except when we're called
    2790              :  * recursively).  stack is a search stack leading to it.  Note that we will
    2791              :  * update the stack entry(s) to reflect current downlink positions --- this is
    2792              :  * similar to the corresponding point in page split handling.
    2793              :  *
    2794              :  * If "first stage" caller cannot go ahead with deleting _any_ pages, returns
    2795              :  * false.  Returns true on success, in which case caller can use certain
    2796              :  * details established here to perform the first stage of deletion.  This
    2797              :  * function is the last point at which page deletion may be deemed unsafe
    2798              :  * (barring index corruption, or unexpected concurrent page deletions).
    2799              :  *
    2800              :  * We write lock the parent of the root of the to-be-deleted subtree for
    2801              :  * caller on success (i.e. we leave our lock on the *subtreeparent buffer for
    2802              :  * caller).  Caller will have to remove a downlink from *subtreeparent.  We
    2803              :  * also set a *subtreeparent offset number in *poffset, to indicate the
    2804              :  * location of the pivot tuple that contains the relevant downlink.
    2805              :  *
    2806              :  * The root of the to-be-deleted subtree is called the "top parent".  Note
    2807              :  * that the leafbuf page is often the final "top parent" page (you can think
    2808              :  * of the leafbuf page as a degenerate single page subtree when that happens).
    2809              :  * Caller should initialize *topparent to the target leafbuf page block number
    2810              :  * (while *topparentrightsib should be set to leafbuf's right sibling block
    2811              :  * number).  We will update *topparent (and *topparentrightsib) for caller
    2812              :  * here, though only when it turns out that caller will delete at least one
    2813              :  * internal page (i.e. only when caller needs to store a valid link to the top
    2814              :  * parent block in the leafbuf page using BTreeTupleSetTopParent()).
    2815              :  */
    2816              : static bool
    2817         2960 : _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child,
    2818              :                         BTStack stack, Buffer *subtreeparent,
    2819              :                         OffsetNumber *poffset, BlockNumber *topparent,
    2820              :                         BlockNumber *topparentrightsib)
    2821              : {
    2822              :     BlockNumber parent,
    2823              :                 leftsibparent;
    2824              :     OffsetNumber parentoffset,
    2825              :                 maxoff;
    2826              :     Buffer      pbuf;
    2827              :     Page        page;
    2828              :     BTPageOpaque opaque;
    2829              : 
    2830              :     /*
    2831              :      * Locate the pivot tuple whose downlink points to "child".  Write lock
    2832              :      * the parent page itself.
    2833              :      */
    2834         2960 :     pbuf = _bt_getstackbuf(rel, heaprel, stack, child);
    2835         2960 :     if (pbuf == InvalidBuffer)
    2836              :     {
    2837              :         /*
    2838              :          * Failed to "re-find" a pivot tuple whose downlink matched our child
    2839              :          * block number on the parent level -- the index must be corrupt.
    2840              :          * Don't even try to delete the leafbuf subtree.  Just report the
    2841              :          * issue and press on with vacuuming the index.
    2842              :          *
    2843              :          * Note: _bt_getstackbuf() recovers from concurrent page splits that
    2844              :          * take place on the parent level.  Its approach is a near-exhaustive
    2845              :          * linear search.  This also gives it a surprisingly good chance of
    2846              :          * recovering in the event of a buggy or inconsistent opclass.  But we
    2847              :          * don't rely on that here.
    2848              :          */
    2849            0 :         ereport(LOG,
    2850              :                 (errcode(ERRCODE_INDEX_CORRUPTED),
    2851              :                  errmsg_internal("failed to re-find parent key in index \"%s\" for deletion target page %u",
    2852              :                                  RelationGetRelationName(rel), child)));
    2853              :         Assert(false);
    2854            0 :         return false;
    2855              :     }
    2856              : 
    2857         2960 :     parent = stack->bts_blkno;
    2858         2960 :     parentoffset = stack->bts_offset;
    2859              : 
    2860         2960 :     page = BufferGetPage(pbuf);
    2861         2960 :     opaque = BTPageGetOpaque(page);
    2862         2960 :     maxoff = PageGetMaxOffsetNumber(page);
    2863         2960 :     leftsibparent = opaque->btpo_prev;
    2864              : 
    2865              :     /*
    2866              :      * _bt_getstackbuf() completes page splits on returned parent buffer when
    2867              :      * required.
    2868              :      *
    2869              :      * In general it's a bad idea for VACUUM to use up more disk space, which
    2870              :      * is why page deletion does not finish incomplete page splits most of the
    2871              :      * time.  We allow this limited exception because the risk is much lower,
    2872              :      * and the potential downside of not proceeding is much higher:  A single
    2873              :      * internal page with the INCOMPLETE_SPLIT flag set might otherwise
    2874              :      * prevent us from deleting hundreds of empty leaf pages from one level
    2875              :      * down.
    2876              :      */
    2877              :     Assert(!P_INCOMPLETE_SPLIT(opaque));
    2878              : 
    2879         2960 :     if (parentoffset < maxoff)
    2880              :     {
    2881              :         /*
    2882              :          * Child is not the rightmost child in parent, so it's safe to delete
    2883              :          * the subtree whose root/topparent is child page
    2884              :          */
    2885         2871 :         *subtreeparent = pbuf;
    2886         2871 :         *poffset = parentoffset;
    2887         2871 :         return true;
    2888              :     }
    2889              : 
    2890              :     /*
    2891              :      * Child is the rightmost child of parent.
    2892              :      *
    2893              :      * Since it's the rightmost child of parent, deleting the child (or
    2894              :      * deleting the subtree whose root/topparent is the child page) is only
    2895              :      * safe when it's also possible to delete the parent.
    2896              :      */
    2897              :     Assert(parentoffset == maxoff);
    2898           89 :     if (parentoffset != P_FIRSTDATAKEY(opaque) || P_RIGHTMOST(opaque))
    2899              :     {
    2900              :         /*
    2901              :          * Child isn't parent's only child, or parent is rightmost on its
    2902              :          * entire level.  Definitely cannot delete any pages.
    2903              :          */
    2904            1 :         _bt_relbuf(rel, pbuf);
    2905            1 :         return false;
    2906              :     }
    2907              : 
    2908              :     /*
    2909              :      * Now make sure that the parent deletion is itself safe by examining the
    2910              :      * child's grandparent page.  Recurse, passing the parent page as the
    2911              :      * child page (child's grandparent is the parent on the next level up). If
    2912              :      * parent deletion is unsafe, then child deletion must also be unsafe (in
    2913              :      * which case caller cannot delete any pages at all).
    2914              :      */
    2915           88 :     *topparent = parent;
    2916           88 :     *topparentrightsib = opaque->btpo_next;
    2917              : 
    2918              :     /*
    2919              :      * Release lock on parent before recursing.
    2920              :      *
    2921              :      * It's OK to release page locks on parent before recursive call locks
    2922              :      * grandparent.  An internal page can only acquire an entry if the child
    2923              :      * is split, but that cannot happen as long as we still hold a lock on the
    2924              :      * leafbuf page.
    2925              :      */
    2926           88 :     _bt_relbuf(rel, pbuf);
    2927              : 
    2928              :     /*
    2929              :      * Before recursing, check that the left sibling of parent (if any) is not
    2930              :      * marked with INCOMPLETE_SPLIT flag first (must do so after we drop the
    2931              :      * parent lock).
    2932              :      *
    2933              :      * Note: We deliberately avoid completing incomplete splits here.
    2934              :      */
    2935           88 :     if (_bt_leftsib_splitflag(rel, leftsibparent, parent))
    2936            0 :         return false;
    2937              : 
    2938              :     /* Recurse to examine child page's grandparent page */
    2939           88 :     return _bt_lock_subtree_parent(rel, heaprel, parent, stack->bts_parent,
    2940              :                                    subtreeparent, poffset,
    2941              :                                    topparent, topparentrightsib);
    2942              : }
    2943              : 
    2944              : /*
    2945              :  * Initialize local memory state used by VACUUM for _bt_pendingfsm_finalize
    2946              :  * optimization.
    2947              :  *
    2948              :  * Called at the start of a btvacuumscan().  Caller's cleanuponly argument
    2949              :  * indicates if ongoing VACUUM has not (and will not) call btbulkdelete().
    2950              :  *
    2951              :  * We expect to allocate memory inside VACUUM's top-level memory context here.
    2952              :  * The working buffer is subject to a limit based on work_mem.  Our strategy
    2953              :  * when the array can no longer grow within the bounds of that limit is to
    2954              :  * stop saving additional newly deleted pages, while proceeding as usual with
    2955              :  * the pages that we can fit.
    2956              :  */
    2957              : void
    2958         1538 : _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
    2959              : {
    2960              :     Size        maxbufsize;
    2961              : 
    2962              :     /*
    2963              :      * Don't bother with optimization in cleanup-only case -- we don't expect
    2964              :      * any newly deleted pages.  Besides, cleanup-only calls to btvacuumscan()
    2965              :      * can only take place because this optimization didn't work out during
    2966              :      * the last VACUUM.
    2967              :      */
    2968         1538 :     if (cleanuponly)
    2969            6 :         return;
    2970              : 
    2971              :     /*
    2972              :      * Cap maximum size of array so that we always respect work_mem.  Avoid
    2973              :      * int overflow here.
    2974              :      */
    2975         1532 :     vstate->bufsize = 256;
    2976         1532 :     maxbufsize = (work_mem * (Size) 1024) / sizeof(BTPendingFSM);
    2977         1532 :     maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
    2978              :     /* BTVacState.maxbufsize has type int */
    2979         1532 :     maxbufsize = Min(maxbufsize, INT_MAX);
    2980              :     /* Stay sane with small work_mem */
    2981         1532 :     maxbufsize = Max(maxbufsize, vstate->bufsize);
    2982         1532 :     vstate->maxbufsize = (int) maxbufsize;
    2983              : 
    2984              :     /* Allocate buffer, indicate that there are currently 0 pending pages */
    2985         1532 :     vstate->pendingpages = palloc_array(BTPendingFSM, vstate->bufsize);
    2986         1532 :     vstate->npendingpages = 0;
    2987              : }
    2988              : 
    2989              : /*
    2990              :  * Place any newly deleted pages (i.e. pages that _bt_pagedel() deleted during
    2991              :  * the ongoing VACUUM operation) into the free space map -- though only when
    2992              :  * it is actually safe to do so by now.
    2993              :  *
    2994              :  * Called at the end of a btvacuumscan(), just before free space map vacuuming
    2995              :  * takes place.
    2996              :  *
    2997              :  * Frees memory allocated by _bt_pendingfsm_init(), if any.
    2998              :  */
    2999              : void
    3000         1538 : _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
    3001              : {
    3002         1538 :     IndexBulkDeleteResult *stats = vstate->stats;
    3003         1538 :     Relation    heaprel = vstate->info->heaprel;
    3004              : 
    3005              :     Assert(stats->pages_newly_deleted >= vstate->npendingpages);
    3006              :     Assert(heaprel != NULL);
    3007              : 
    3008         1538 :     if (vstate->npendingpages == 0)
    3009              :     {
    3010              :         /* Just free memory when nothing to do */
    3011         1462 :         if (vstate->pendingpages)
    3012         1456 :             pfree(vstate->pendingpages);
    3013              : 
    3014         1462 :         return;
    3015              :     }
    3016              : 
    3017              : #ifdef DEBUG_BTREE_PENDING_FSM
    3018              : 
    3019              :     /*
    3020              :      * Debugging aid: Sleep for 5 seconds to greatly increase the chances of
    3021              :      * placing pending pages in the FSM.  Note that the optimization will
    3022              :      * never be effective without some other backend concurrently consuming an
    3023              :      * XID.
    3024              :      */
    3025              :     pg_usleep(5000000L);
    3026              : #endif
    3027              : 
    3028              :     /*
    3029              :      * Recompute VACUUM XID boundaries.
    3030              :      *
    3031              :      * We don't actually care about the oldest non-removable XID.  Computing
    3032              :      * the oldest such XID has a useful side-effect that we rely on: it
    3033              :      * forcibly updates the XID horizon state for this backend.  This step is
    3034              :      * essential; GlobalVisCheckRemovableFullXid() will not reliably recognize
    3035              :      * that it is now safe to recycle newly deleted pages without this step.
    3036              :      */
    3037           76 :     GetOldestNonRemovableTransactionId(heaprel);
    3038              : 
    3039           80 :     for (int i = 0; i < vstate->npendingpages; i++)
    3040              :     {
    3041           80 :         BlockNumber target = vstate->pendingpages[i].target;
    3042           80 :         FullTransactionId safexid = vstate->pendingpages[i].safexid;
    3043              : 
    3044              :         /*
    3045              :          * Do the equivalent of checking BTPageIsRecyclable(), but without
    3046              :          * accessing the page again a second time.
    3047              :          *
    3048              :          * Give up on finding the first non-recyclable page -- all later pages
    3049              :          * must be non-recyclable too, since _bt_pendingfsm_add() adds pages
    3050              :          * to the array in safexid order.
    3051              :          */
    3052           80 :         if (!GlobalVisCheckRemovableFullXid(heaprel, safexid))
    3053           76 :             break;
    3054              : 
    3055            4 :         RecordFreeIndexPage(rel, target);
    3056            4 :         stats->pages_free++;
    3057              :     }
    3058              : 
    3059           76 :     pfree(vstate->pendingpages);
    3060              : }
    3061              : 
    3062              : /*
    3063              :  * Maintain array of pages that were deleted during current btvacuumscan()
    3064              :  * call, for use in _bt_pendingfsm_finalize()
    3065              :  */
    3066              : static void
    3067         2957 : _bt_pendingfsm_add(BTVacState *vstate,
    3068              :                    BlockNumber target,
    3069              :                    FullTransactionId safexid)
    3070              : {
    3071              :     Assert(vstate->npendingpages <= vstate->bufsize);
    3072              :     Assert(vstate->bufsize <= vstate->maxbufsize);
    3073              : 
    3074              : #ifdef USE_ASSERT_CHECKING
    3075              : 
    3076              :     /*
    3077              :      * Verify an assumption made by _bt_pendingfsm_finalize(): pages from the
    3078              :      * array will always be in safexid order (since that is the order that we
    3079              :      * save them in here)
    3080              :      */
    3081              :     if (vstate->npendingpages > 0)
    3082              :     {
    3083              :         FullTransactionId lastsafexid =
    3084              :             vstate->pendingpages[vstate->npendingpages - 1].safexid;
    3085              : 
    3086              :         Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
    3087              :     }
    3088              : #endif
    3089              : 
    3090              :     /*
    3091              :      * If temp buffer reaches maxbufsize/work_mem capacity then we discard
    3092              :      * information about this page.
    3093              :      *
    3094              :      * Note that this also covers the case where we opted to not use the
    3095              :      * optimization in _bt_pendingfsm_init().
    3096              :      */
    3097         2957 :     if (vstate->npendingpages == vstate->maxbufsize)
    3098            0 :         return;
    3099              : 
    3100              :     /* Consider enlarging buffer */
    3101         2957 :     if (vstate->npendingpages == vstate->bufsize)
    3102              :     {
    3103            4 :         int         newbufsize = vstate->bufsize * 2;
    3104              : 
    3105              :         /* Respect work_mem */
    3106            4 :         if (newbufsize > vstate->maxbufsize)
    3107            0 :             newbufsize = vstate->maxbufsize;
    3108              : 
    3109            4 :         vstate->bufsize = newbufsize;
    3110            4 :         vstate->pendingpages =
    3111            4 :             repalloc(vstate->pendingpages,
    3112            4 :                      sizeof(BTPendingFSM) * vstate->bufsize);
    3113              :     }
    3114              : 
    3115              :     /* Save metadata for newly deleted page */
    3116         2957 :     vstate->pendingpages[vstate->npendingpages].target = target;
    3117         2957 :     vstate->pendingpages[vstate->npendingpages].safexid = safexid;
    3118         2957 :     vstate->npendingpages++;
    3119              : }
        

Generated by: LCOV version 2.0-1