LCOV - code coverage report
Current view: top level - src/backend/access/hash - hash_xlog.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 0 445 0.0 %
Date: 2019-11-15 23:07:02 Functions: 0 15 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * hash_xlog.c
       4             :  *    WAL replay logic for hash index.
       5             :  *
       6             :  *
       7             :  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
       8             :  * Portions Copyright (c) 1994, Regents of the University of California
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/access/hash/hash_xlog.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : #include "postgres.h"
      16             : 
      17             : #include "access/bufmask.h"
      18             : #include "access/hash.h"
      19             : #include "access/hash_xlog.h"
      20             : #include "access/transam.h"
      21             : #include "access/xlog.h"
      22             : #include "access/xlogutils.h"
      23             : #include "miscadmin.h"
      24             : #include "storage/procarray.h"
      25             : 
      26             : /*
      27             :  * replay a hash index meta page
      28             :  */
      29             : static void
      30           0 : hash_xlog_init_meta_page(XLogReaderState *record)
      31             : {
      32           0 :     XLogRecPtr  lsn = record->EndRecPtr;
      33             :     Page        page;
      34             :     Buffer      metabuf;
      35             :     ForkNumber  forknum;
      36             : 
      37           0 :     xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) XLogRecGetData(record);
      38             : 
      39             :     /* create the index' metapage */
      40           0 :     metabuf = XLogInitBufferForRedo(record, 0);
      41             :     Assert(BufferIsValid(metabuf));
      42           0 :     _hash_init_metabuffer(metabuf, xlrec->num_tuples, xlrec->procid,
      43           0 :                           xlrec->ffactor, true);
      44           0 :     page = (Page) BufferGetPage(metabuf);
      45           0 :     PageSetLSN(page, lsn);
      46           0 :     MarkBufferDirty(metabuf);
      47             : 
      48             :     /*
      49             :      * Force the on-disk state of init forks to always be in sync with the
      50             :      * state in shared buffers.  See XLogReadBufferForRedoExtended.  We need
      51             :      * special handling for init forks as create index operations don't log a
      52             :      * full page image of the metapage.
      53             :      */
      54           0 :     XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
      55           0 :     if (forknum == INIT_FORKNUM)
      56           0 :         FlushOneBuffer(metabuf);
      57             : 
      58             :     /* all done */
      59           0 :     UnlockReleaseBuffer(metabuf);
      60           0 : }
      61             : 
      62             : /*
      63             :  * replay a hash index bitmap page
      64             :  */
      65             : static void
      66           0 : hash_xlog_init_bitmap_page(XLogReaderState *record)
      67             : {
      68           0 :     XLogRecPtr  lsn = record->EndRecPtr;
      69             :     Buffer      bitmapbuf;
      70             :     Buffer      metabuf;
      71             :     Page        page;
      72             :     HashMetaPage metap;
      73             :     uint32      num_buckets;
      74             :     ForkNumber  forknum;
      75             : 
      76           0 :     xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *) XLogRecGetData(record);
      77             : 
      78             :     /*
      79             :      * Initialize bitmap page
      80             :      */
      81           0 :     bitmapbuf = XLogInitBufferForRedo(record, 0);
      82           0 :     _hash_initbitmapbuffer(bitmapbuf, xlrec->bmsize, true);
      83           0 :     PageSetLSN(BufferGetPage(bitmapbuf), lsn);
      84           0 :     MarkBufferDirty(bitmapbuf);
      85             : 
      86             :     /*
      87             :      * Force the on-disk state of init forks to always be in sync with the
      88             :      * state in shared buffers.  See XLogReadBufferForRedoExtended.  We need
      89             :      * special handling for init forks as create index operations don't log a
      90             :      * full page image of the metapage.
      91             :      */
      92           0 :     XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
      93           0 :     if (forknum == INIT_FORKNUM)
      94           0 :         FlushOneBuffer(bitmapbuf);
      95           0 :     UnlockReleaseBuffer(bitmapbuf);
      96             : 
      97             :     /* add the new bitmap page to the metapage's list of bitmaps */
      98           0 :     if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
      99             :     {
     100             :         /*
     101             :          * Note: in normal operation, we'd update the metapage while still
     102             :          * holding lock on the bitmap page.  But during replay it's not
     103             :          * necessary to hold that lock, since nobody can see it yet; the
     104             :          * creating transaction hasn't yet committed.
     105             :          */
     106           0 :         page = BufferGetPage(metabuf);
     107           0 :         metap = HashPageGetMeta(page);
     108             : 
     109           0 :         num_buckets = metap->hashm_maxbucket + 1;
     110           0 :         metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
     111           0 :         metap->hashm_nmaps++;
     112             : 
     113           0 :         PageSetLSN(page, lsn);
     114           0 :         MarkBufferDirty(metabuf);
     115             : 
     116           0 :         XLogRecGetBlockTag(record, 1, NULL, &forknum, NULL);
     117           0 :         if (forknum == INIT_FORKNUM)
     118           0 :             FlushOneBuffer(metabuf);
     119             :     }
     120           0 :     if (BufferIsValid(metabuf))
     121           0 :         UnlockReleaseBuffer(metabuf);
     122           0 : }
     123             : 
     124             : /*
     125             :  * replay a hash index insert without split
     126             :  */
     127             : static void
     128           0 : hash_xlog_insert(XLogReaderState *record)
     129             : {
     130             :     HashMetaPage metap;
     131           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     132           0 :     xl_hash_insert *xlrec = (xl_hash_insert *) XLogRecGetData(record);
     133             :     Buffer      buffer;
     134             :     Page        page;
     135             : 
     136           0 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
     137             :     {
     138             :         Size        datalen;
     139           0 :         char       *datapos = XLogRecGetBlockData(record, 0, &datalen);
     140             : 
     141           0 :         page = BufferGetPage(buffer);
     142             : 
     143           0 :         if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
     144             :                         false, false) == InvalidOffsetNumber)
     145           0 :             elog(PANIC, "hash_xlog_insert: failed to add item");
     146             : 
     147           0 :         PageSetLSN(page, lsn);
     148           0 :         MarkBufferDirty(buffer);
     149             :     }
     150           0 :     if (BufferIsValid(buffer))
     151           0 :         UnlockReleaseBuffer(buffer);
     152             : 
     153           0 :     if (XLogReadBufferForRedo(record, 1, &buffer) == BLK_NEEDS_REDO)
     154             :     {
     155             :         /*
     156             :          * Note: in normal operation, we'd update the metapage while still
     157             :          * holding lock on the page we inserted into.  But during replay it's
     158             :          * not necessary to hold that lock, since no other index updates can
     159             :          * be happening concurrently.
     160             :          */
     161           0 :         page = BufferGetPage(buffer);
     162           0 :         metap = HashPageGetMeta(page);
     163           0 :         metap->hashm_ntuples += 1;
     164             : 
     165           0 :         PageSetLSN(page, lsn);
     166           0 :         MarkBufferDirty(buffer);
     167             :     }
     168           0 :     if (BufferIsValid(buffer))
     169           0 :         UnlockReleaseBuffer(buffer);
     170           0 : }
     171             : 
     172             : /*
     173             :  * replay addition of overflow page for hash index
     174             :  */
     175             : static void
     176           0 : hash_xlog_add_ovfl_page(XLogReaderState *record)
     177             : {
     178           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     179           0 :     xl_hash_add_ovfl_page *xlrec = (xl_hash_add_ovfl_page *) XLogRecGetData(record);
     180             :     Buffer      leftbuf;
     181             :     Buffer      ovflbuf;
     182             :     Buffer      metabuf;
     183             :     BlockNumber leftblk;
     184             :     BlockNumber rightblk;
     185           0 :     BlockNumber newmapblk = InvalidBlockNumber;
     186             :     Page        ovflpage;
     187             :     HashPageOpaque ovflopaque;
     188             :     uint32     *num_bucket;
     189             :     char       *data;
     190             :     Size        datalen PG_USED_FOR_ASSERTS_ONLY;
     191           0 :     bool        new_bmpage = false;
     192             : 
     193           0 :     XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk);
     194           0 :     XLogRecGetBlockTag(record, 1, NULL, NULL, &leftblk);
     195             : 
     196           0 :     ovflbuf = XLogInitBufferForRedo(record, 0);
     197             :     Assert(BufferIsValid(ovflbuf));
     198             : 
     199           0 :     data = XLogRecGetBlockData(record, 0, &datalen);
     200           0 :     num_bucket = (uint32 *) data;
     201             :     Assert(datalen == sizeof(uint32));
     202           0 :     _hash_initbuf(ovflbuf, InvalidBlockNumber, *num_bucket, LH_OVERFLOW_PAGE,
     203             :                   true);
     204             :     /* update backlink */
     205           0 :     ovflpage = BufferGetPage(ovflbuf);
     206           0 :     ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
     207           0 :     ovflopaque->hasho_prevblkno = leftblk;
     208             : 
     209           0 :     PageSetLSN(ovflpage, lsn);
     210           0 :     MarkBufferDirty(ovflbuf);
     211             : 
     212           0 :     if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO)
     213             :     {
     214             :         Page        leftpage;
     215             :         HashPageOpaque leftopaque;
     216             : 
     217           0 :         leftpage = BufferGetPage(leftbuf);
     218           0 :         leftopaque = (HashPageOpaque) PageGetSpecialPointer(leftpage);
     219           0 :         leftopaque->hasho_nextblkno = rightblk;
     220             : 
     221           0 :         PageSetLSN(leftpage, lsn);
     222           0 :         MarkBufferDirty(leftbuf);
     223             :     }
     224             : 
     225           0 :     if (BufferIsValid(leftbuf))
     226           0 :         UnlockReleaseBuffer(leftbuf);
     227           0 :     UnlockReleaseBuffer(ovflbuf);
     228             : 
     229             :     /*
     230             :      * Note: in normal operation, we'd update the bitmap and meta page while
     231             :      * still holding lock on the overflow pages.  But during replay it's not
     232             :      * necessary to hold those locks, since no other index updates can be
     233             :      * happening concurrently.
     234             :      */
     235           0 :     if (XLogRecHasBlockRef(record, 2))
     236             :     {
     237             :         Buffer      mapbuffer;
     238             : 
     239           0 :         if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO)
     240             :         {
     241           0 :             Page        mappage = (Page) BufferGetPage(mapbuffer);
     242           0 :             uint32     *freep = NULL;
     243             :             char       *data;
     244             :             uint32     *bitmap_page_bit;
     245             : 
     246           0 :             freep = HashPageGetBitmap(mappage);
     247             : 
     248           0 :             data = XLogRecGetBlockData(record, 2, &datalen);
     249           0 :             bitmap_page_bit = (uint32 *) data;
     250             : 
     251           0 :             SETBIT(freep, *bitmap_page_bit);
     252             : 
     253           0 :             PageSetLSN(mappage, lsn);
     254           0 :             MarkBufferDirty(mapbuffer);
     255             :         }
     256           0 :         if (BufferIsValid(mapbuffer))
     257           0 :             UnlockReleaseBuffer(mapbuffer);
     258             :     }
     259             : 
     260           0 :     if (XLogRecHasBlockRef(record, 3))
     261             :     {
     262             :         Buffer      newmapbuf;
     263             : 
     264           0 :         newmapbuf = XLogInitBufferForRedo(record, 3);
     265             : 
     266           0 :         _hash_initbitmapbuffer(newmapbuf, xlrec->bmsize, true);
     267             : 
     268           0 :         new_bmpage = true;
     269           0 :         newmapblk = BufferGetBlockNumber(newmapbuf);
     270             : 
     271           0 :         MarkBufferDirty(newmapbuf);
     272           0 :         PageSetLSN(BufferGetPage(newmapbuf), lsn);
     273             : 
     274           0 :         UnlockReleaseBuffer(newmapbuf);
     275             :     }
     276             : 
     277           0 :     if (XLogReadBufferForRedo(record, 4, &metabuf) == BLK_NEEDS_REDO)
     278             :     {
     279             :         HashMetaPage metap;
     280             :         Page        page;
     281             :         uint32     *firstfree_ovflpage;
     282             : 
     283           0 :         data = XLogRecGetBlockData(record, 4, &datalen);
     284           0 :         firstfree_ovflpage = (uint32 *) data;
     285             : 
     286           0 :         page = BufferGetPage(metabuf);
     287           0 :         metap = HashPageGetMeta(page);
     288           0 :         metap->hashm_firstfree = *firstfree_ovflpage;
     289             : 
     290           0 :         if (!xlrec->bmpage_found)
     291             :         {
     292           0 :             metap->hashm_spares[metap->hashm_ovflpoint]++;
     293             : 
     294           0 :             if (new_bmpage)
     295             :             {
     296             :                 Assert(BlockNumberIsValid(newmapblk));
     297             : 
     298           0 :                 metap->hashm_mapp[metap->hashm_nmaps] = newmapblk;
     299           0 :                 metap->hashm_nmaps++;
     300           0 :                 metap->hashm_spares[metap->hashm_ovflpoint]++;
     301             :             }
     302             :         }
     303             : 
     304           0 :         PageSetLSN(page, lsn);
     305           0 :         MarkBufferDirty(metabuf);
     306             :     }
     307           0 :     if (BufferIsValid(metabuf))
     308           0 :         UnlockReleaseBuffer(metabuf);
     309           0 : }
     310             : 
     311             : /*
     312             :  * replay allocation of page for split operation
     313             :  */
     314             : static void
     315           0 : hash_xlog_split_allocate_page(XLogReaderState *record)
     316             : {
     317           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     318           0 :     xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *) XLogRecGetData(record);
     319             :     Buffer      oldbuf;
     320             :     Buffer      newbuf;
     321             :     Buffer      metabuf;
     322             :     Size        datalen PG_USED_FOR_ASSERTS_ONLY;
     323             :     char       *data;
     324             :     XLogRedoAction action;
     325             : 
     326             :     /*
     327             :      * To be consistent with normal operation, here we take cleanup locks on
     328             :      * both the old and new buckets even though there can't be any concurrent
     329             :      * inserts.
     330             :      */
     331             : 
     332             :     /* replay the record for old bucket */
     333           0 :     action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &oldbuf);
     334             : 
     335             :     /*
     336             :      * Note that we still update the page even if it was restored from a full
     337             :      * page image, because the special space is not included in the image.
     338             :      */
     339           0 :     if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
     340             :     {
     341             :         Page        oldpage;
     342             :         HashPageOpaque oldopaque;
     343             : 
     344           0 :         oldpage = BufferGetPage(oldbuf);
     345           0 :         oldopaque = (HashPageOpaque) PageGetSpecialPointer(oldpage);
     346             : 
     347           0 :         oldopaque->hasho_flag = xlrec->old_bucket_flag;
     348           0 :         oldopaque->hasho_prevblkno = xlrec->new_bucket;
     349             : 
     350           0 :         PageSetLSN(oldpage, lsn);
     351           0 :         MarkBufferDirty(oldbuf);
     352             :     }
     353             : 
     354             :     /* replay the record for new bucket */
     355           0 :     newbuf = XLogInitBufferForRedo(record, 1);
     356           0 :     _hash_initbuf(newbuf, xlrec->new_bucket, xlrec->new_bucket,
     357           0 :                   xlrec->new_bucket_flag, true);
     358           0 :     if (!IsBufferCleanupOK(newbuf))
     359           0 :         elog(PANIC, "hash_xlog_split_allocate_page: failed to acquire cleanup lock");
     360           0 :     MarkBufferDirty(newbuf);
     361           0 :     PageSetLSN(BufferGetPage(newbuf), lsn);
     362             : 
     363             :     /*
     364             :      * We can release the lock on old bucket early as well but doing here to
     365             :      * consistent with normal operation.
     366             :      */
     367           0 :     if (BufferIsValid(oldbuf))
     368           0 :         UnlockReleaseBuffer(oldbuf);
     369           0 :     if (BufferIsValid(newbuf))
     370           0 :         UnlockReleaseBuffer(newbuf);
     371             : 
     372             :     /*
     373             :      * Note: in normal operation, we'd update the meta page while still
     374             :      * holding lock on the old and new bucket pages.  But during replay it's
     375             :      * not necessary to hold those locks, since no other bucket splits can be
     376             :      * happening concurrently.
     377             :      */
     378             : 
     379             :     /* replay the record for metapage changes */
     380           0 :     if (XLogReadBufferForRedo(record, 2, &metabuf) == BLK_NEEDS_REDO)
     381             :     {
     382             :         Page        page;
     383             :         HashMetaPage metap;
     384             : 
     385           0 :         page = BufferGetPage(metabuf);
     386           0 :         metap = HashPageGetMeta(page);
     387           0 :         metap->hashm_maxbucket = xlrec->new_bucket;
     388             : 
     389           0 :         data = XLogRecGetBlockData(record, 2, &datalen);
     390             : 
     391           0 :         if (xlrec->flags & XLH_SPLIT_META_UPDATE_MASKS)
     392             :         {
     393             :             uint32      lowmask;
     394             :             uint32     *highmask;
     395             : 
     396             :             /* extract low and high masks. */
     397           0 :             memcpy(&lowmask, data, sizeof(uint32));
     398           0 :             highmask = (uint32 *) ((char *) data + sizeof(uint32));
     399             : 
     400             :             /* update metapage */
     401           0 :             metap->hashm_lowmask = lowmask;
     402           0 :             metap->hashm_highmask = *highmask;
     403             : 
     404           0 :             data += sizeof(uint32) * 2;
     405             :         }
     406             : 
     407           0 :         if (xlrec->flags & XLH_SPLIT_META_UPDATE_SPLITPOINT)
     408             :         {
     409             :             uint32      ovflpoint;
     410             :             uint32     *ovflpages;
     411             : 
     412             :             /* extract information of overflow pages. */
     413           0 :             memcpy(&ovflpoint, data, sizeof(uint32));
     414           0 :             ovflpages = (uint32 *) ((char *) data + sizeof(uint32));
     415             : 
     416             :             /* update metapage */
     417           0 :             metap->hashm_spares[ovflpoint] = *ovflpages;
     418           0 :             metap->hashm_ovflpoint = ovflpoint;
     419             :         }
     420             : 
     421           0 :         MarkBufferDirty(metabuf);
     422           0 :         PageSetLSN(BufferGetPage(metabuf), lsn);
     423             :     }
     424             : 
     425           0 :     if (BufferIsValid(metabuf))
     426           0 :         UnlockReleaseBuffer(metabuf);
     427           0 : }
     428             : 
     429             : /*
     430             :  * replay of split operation
     431             :  */
     432             : static void
     433           0 : hash_xlog_split_page(XLogReaderState *record)
     434             : {
     435             :     Buffer      buf;
     436             : 
     437           0 :     if (XLogReadBufferForRedo(record, 0, &buf) != BLK_RESTORED)
     438           0 :         elog(ERROR, "Hash split record did not contain a full-page image");
     439             : 
     440           0 :     UnlockReleaseBuffer(buf);
     441           0 : }
     442             : 
     443             : /*
     444             :  * replay completion of split operation
     445             :  */
     446             : static void
     447           0 : hash_xlog_split_complete(XLogReaderState *record)
     448             : {
     449           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     450           0 :     xl_hash_split_complete *xlrec = (xl_hash_split_complete *) XLogRecGetData(record);
     451             :     Buffer      oldbuf;
     452             :     Buffer      newbuf;
     453             :     XLogRedoAction action;
     454             : 
     455             :     /* replay the record for old bucket */
     456           0 :     action = XLogReadBufferForRedo(record, 0, &oldbuf);
     457             : 
     458             :     /*
     459             :      * Note that we still update the page even if it was restored from a full
     460             :      * page image, because the bucket flag is not included in the image.
     461             :      */
     462           0 :     if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
     463             :     {
     464             :         Page        oldpage;
     465             :         HashPageOpaque oldopaque;
     466             : 
     467           0 :         oldpage = BufferGetPage(oldbuf);
     468           0 :         oldopaque = (HashPageOpaque) PageGetSpecialPointer(oldpage);
     469             : 
     470           0 :         oldopaque->hasho_flag = xlrec->old_bucket_flag;
     471             : 
     472           0 :         PageSetLSN(oldpage, lsn);
     473           0 :         MarkBufferDirty(oldbuf);
     474             :     }
     475           0 :     if (BufferIsValid(oldbuf))
     476           0 :         UnlockReleaseBuffer(oldbuf);
     477             : 
     478             :     /* replay the record for new bucket */
     479           0 :     action = XLogReadBufferForRedo(record, 1, &newbuf);
     480             : 
     481             :     /*
     482             :      * Note that we still update the page even if it was restored from a full
     483             :      * page image, because the bucket flag is not included in the image.
     484             :      */
     485           0 :     if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
     486             :     {
     487             :         Page        newpage;
     488             :         HashPageOpaque nopaque;
     489             : 
     490           0 :         newpage = BufferGetPage(newbuf);
     491           0 :         nopaque = (HashPageOpaque) PageGetSpecialPointer(newpage);
     492             : 
     493           0 :         nopaque->hasho_flag = xlrec->new_bucket_flag;
     494             : 
     495           0 :         PageSetLSN(newpage, lsn);
     496           0 :         MarkBufferDirty(newbuf);
     497             :     }
     498           0 :     if (BufferIsValid(newbuf))
     499           0 :         UnlockReleaseBuffer(newbuf);
     500           0 : }
     501             : 
     502             : /*
     503             :  * replay move of page contents for squeeze operation of hash index
     504             :  */
     505             : static void
     506           0 : hash_xlog_move_page_contents(XLogReaderState *record)
     507             : {
     508           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     509           0 :     xl_hash_move_page_contents *xldata = (xl_hash_move_page_contents *) XLogRecGetData(record);
     510           0 :     Buffer      bucketbuf = InvalidBuffer;
     511           0 :     Buffer      writebuf = InvalidBuffer;
     512           0 :     Buffer      deletebuf = InvalidBuffer;
     513             :     XLogRedoAction action;
     514             : 
     515             :     /*
     516             :      * Ensure we have a cleanup lock on primary bucket page before we start
     517             :      * with the actual replay operation.  This is to ensure that neither a
     518             :      * scan can start nor a scan can be already-in-progress during the replay
     519             :      * of this operation.  If we allow scans during this operation, then they
     520             :      * can miss some records or show the same record multiple times.
     521             :      */
     522           0 :     if (xldata->is_prim_bucket_same_wrt)
     523           0 :         action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
     524             :     else
     525             :     {
     526             :         /*
     527             :          * we don't care for return value as the purpose of reading bucketbuf
     528             :          * is to ensure a cleanup lock on primary bucket page.
     529             :          */
     530           0 :         (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
     531             : 
     532           0 :         action = XLogReadBufferForRedo(record, 1, &writebuf);
     533             :     }
     534             : 
     535             :     /* replay the record for adding entries in overflow buffer */
     536           0 :     if (action == BLK_NEEDS_REDO)
     537             :     {
     538             :         Page        writepage;
     539             :         char       *begin;
     540             :         char       *data;
     541             :         Size        datalen;
     542           0 :         uint16      ninserted = 0;
     543             : 
     544           0 :         data = begin = XLogRecGetBlockData(record, 1, &datalen);
     545             : 
     546           0 :         writepage = (Page) BufferGetPage(writebuf);
     547             : 
     548           0 :         if (xldata->ntups > 0)
     549             :         {
     550           0 :             OffsetNumber *towrite = (OffsetNumber *) data;
     551             : 
     552           0 :             data += sizeof(OffsetNumber) * xldata->ntups;
     553             : 
     554           0 :             while (data - begin < datalen)
     555             :             {
     556           0 :                 IndexTuple  itup = (IndexTuple) data;
     557             :                 Size        itemsz;
     558             :                 OffsetNumber l;
     559             : 
     560           0 :                 itemsz = IndexTupleSize(itup);
     561           0 :                 itemsz = MAXALIGN(itemsz);
     562             : 
     563           0 :                 data += itemsz;
     564             : 
     565           0 :                 l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
     566           0 :                 if (l == InvalidOffsetNumber)
     567           0 :                     elog(ERROR, "hash_xlog_move_page_contents: failed to add item to hash index page, size %d bytes",
     568             :                          (int) itemsz);
     569             : 
     570           0 :                 ninserted++;
     571             :             }
     572             :         }
     573             : 
     574             :         /*
     575             :          * number of tuples inserted must be same as requested in REDO record.
     576             :          */
     577             :         Assert(ninserted == xldata->ntups);
     578             : 
     579           0 :         PageSetLSN(writepage, lsn);
     580           0 :         MarkBufferDirty(writebuf);
     581             :     }
     582             : 
     583             :     /* replay the record for deleting entries from overflow buffer */
     584           0 :     if (XLogReadBufferForRedo(record, 2, &deletebuf) == BLK_NEEDS_REDO)
     585             :     {
     586             :         Page        page;
     587             :         char       *ptr;
     588             :         Size        len;
     589             : 
     590           0 :         ptr = XLogRecGetBlockData(record, 2, &len);
     591             : 
     592           0 :         page = (Page) BufferGetPage(deletebuf);
     593             : 
     594           0 :         if (len > 0)
     595             :         {
     596             :             OffsetNumber *unused;
     597             :             OffsetNumber *unend;
     598             : 
     599           0 :             unused = (OffsetNumber *) ptr;
     600           0 :             unend = (OffsetNumber *) ((char *) ptr + len);
     601             : 
     602           0 :             if ((unend - unused) > 0)
     603           0 :                 PageIndexMultiDelete(page, unused, unend - unused);
     604             :         }
     605             : 
     606           0 :         PageSetLSN(page, lsn);
     607           0 :         MarkBufferDirty(deletebuf);
     608             :     }
     609             : 
     610             :     /*
     611             :      * Replay is complete, now we can release the buffers. We release locks at
     612             :      * end of replay operation to ensure that we hold lock on primary bucket
     613             :      * page till end of operation.  We can optimize by releasing the lock on
     614             :      * write buffer as soon as the operation for same is complete, if it is
     615             :      * not same as primary bucket page, but that doesn't seem to be worth
     616             :      * complicating the code.
     617             :      */
     618           0 :     if (BufferIsValid(deletebuf))
     619           0 :         UnlockReleaseBuffer(deletebuf);
     620             : 
     621           0 :     if (BufferIsValid(writebuf))
     622           0 :         UnlockReleaseBuffer(writebuf);
     623             : 
     624           0 :     if (BufferIsValid(bucketbuf))
     625           0 :         UnlockReleaseBuffer(bucketbuf);
     626           0 : }
     627             : 
     628             : /*
     629             :  * replay squeeze page operation of hash index
     630             :  */
     631             : static void
     632           0 : hash_xlog_squeeze_page(XLogReaderState *record)
     633             : {
     634           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     635           0 :     xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record);
     636           0 :     Buffer      bucketbuf = InvalidBuffer;
     637             :     Buffer      writebuf;
     638             :     Buffer      ovflbuf;
     639           0 :     Buffer      prevbuf = InvalidBuffer;
     640             :     Buffer      mapbuf;
     641             :     XLogRedoAction action;
     642             : 
     643             :     /*
     644             :      * Ensure we have a cleanup lock on primary bucket page before we start
     645             :      * with the actual replay operation.  This is to ensure that neither a
     646             :      * scan can start nor a scan can be already-in-progress during the replay
     647             :      * of this operation.  If we allow scans during this operation, then they
     648             :      * can miss some records or show the same record multiple times.
     649             :      */
     650           0 :     if (xldata->is_prim_bucket_same_wrt)
     651           0 :         action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
     652             :     else
     653             :     {
     654             :         /*
     655             :          * we don't care for return value as the purpose of reading bucketbuf
     656             :          * is to ensure a cleanup lock on primary bucket page.
     657             :          */
     658           0 :         (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
     659             : 
     660           0 :         action = XLogReadBufferForRedo(record, 1, &writebuf);
     661             :     }
     662             : 
     663             :     /* replay the record for adding entries in overflow buffer */
     664           0 :     if (action == BLK_NEEDS_REDO)
     665             :     {
     666             :         Page        writepage;
     667             :         char       *begin;
     668             :         char       *data;
     669             :         Size        datalen;
     670           0 :         uint16      ninserted = 0;
     671             : 
     672           0 :         data = begin = XLogRecGetBlockData(record, 1, &datalen);
     673             : 
     674           0 :         writepage = (Page) BufferGetPage(writebuf);
     675             : 
     676           0 :         if (xldata->ntups > 0)
     677             :         {
     678           0 :             OffsetNumber *towrite = (OffsetNumber *) data;
     679             : 
     680           0 :             data += sizeof(OffsetNumber) * xldata->ntups;
     681             : 
     682           0 :             while (data - begin < datalen)
     683             :             {
     684           0 :                 IndexTuple  itup = (IndexTuple) data;
     685             :                 Size        itemsz;
     686             :                 OffsetNumber l;
     687             : 
     688           0 :                 itemsz = IndexTupleSize(itup);
     689           0 :                 itemsz = MAXALIGN(itemsz);
     690             : 
     691           0 :                 data += itemsz;
     692             : 
     693           0 :                 l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
     694           0 :                 if (l == InvalidOffsetNumber)
     695           0 :                     elog(ERROR, "hash_xlog_squeeze_page: failed to add item to hash index page, size %d bytes",
     696             :                          (int) itemsz);
     697             : 
     698           0 :                 ninserted++;
     699             :             }
     700             :         }
     701             : 
     702             :         /*
     703             :          * number of tuples inserted must be same as requested in REDO record.
     704             :          */
     705             :         Assert(ninserted == xldata->ntups);
     706             : 
     707             :         /*
     708             :          * if the page on which are adding tuples is a page previous to freed
     709             :          * overflow page, then update its nextblkno.
     710             :          */
     711           0 :         if (xldata->is_prev_bucket_same_wrt)
     712             :         {
     713           0 :             HashPageOpaque writeopaque = (HashPageOpaque) PageGetSpecialPointer(writepage);
     714             : 
     715           0 :             writeopaque->hasho_nextblkno = xldata->nextblkno;
     716             :         }
     717             : 
     718           0 :         PageSetLSN(writepage, lsn);
     719           0 :         MarkBufferDirty(writebuf);
     720             :     }
     721             : 
     722             :     /* replay the record for initializing overflow buffer */
     723           0 :     if (XLogReadBufferForRedo(record, 2, &ovflbuf) == BLK_NEEDS_REDO)
     724             :     {
     725             :         Page        ovflpage;
     726             :         HashPageOpaque ovflopaque;
     727             : 
     728           0 :         ovflpage = BufferGetPage(ovflbuf);
     729             : 
     730           0 :         _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
     731             : 
     732           0 :         ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
     733             : 
     734           0 :         ovflopaque->hasho_prevblkno = InvalidBlockNumber;
     735           0 :         ovflopaque->hasho_nextblkno = InvalidBlockNumber;
     736           0 :         ovflopaque->hasho_bucket = -1;
     737           0 :         ovflopaque->hasho_flag = LH_UNUSED_PAGE;
     738           0 :         ovflopaque->hasho_page_id = HASHO_PAGE_ID;
     739             : 
     740           0 :         PageSetLSN(ovflpage, lsn);
     741           0 :         MarkBufferDirty(ovflbuf);
     742             :     }
     743           0 :     if (BufferIsValid(ovflbuf))
     744           0 :         UnlockReleaseBuffer(ovflbuf);
     745             : 
     746             :     /* replay the record for page previous to the freed overflow page */
     747           0 :     if (!xldata->is_prev_bucket_same_wrt &&
     748           0 :         XLogReadBufferForRedo(record, 3, &prevbuf) == BLK_NEEDS_REDO)
     749             :     {
     750           0 :         Page        prevpage = BufferGetPage(prevbuf);
     751           0 :         HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
     752             : 
     753           0 :         prevopaque->hasho_nextblkno = xldata->nextblkno;
     754             : 
     755           0 :         PageSetLSN(prevpage, lsn);
     756           0 :         MarkBufferDirty(prevbuf);
     757             :     }
     758           0 :     if (BufferIsValid(prevbuf))
     759           0 :         UnlockReleaseBuffer(prevbuf);
     760             : 
     761             :     /* replay the record for page next to the freed overflow page */
     762           0 :     if (XLogRecHasBlockRef(record, 4))
     763             :     {
     764             :         Buffer      nextbuf;
     765             : 
     766           0 :         if (XLogReadBufferForRedo(record, 4, &nextbuf) == BLK_NEEDS_REDO)
     767             :         {
     768           0 :             Page        nextpage = BufferGetPage(nextbuf);
     769           0 :             HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage);
     770             : 
     771           0 :             nextopaque->hasho_prevblkno = xldata->prevblkno;
     772             : 
     773           0 :             PageSetLSN(nextpage, lsn);
     774           0 :             MarkBufferDirty(nextbuf);
     775             :         }
     776           0 :         if (BufferIsValid(nextbuf))
     777           0 :             UnlockReleaseBuffer(nextbuf);
     778             :     }
     779             : 
     780           0 :     if (BufferIsValid(writebuf))
     781           0 :         UnlockReleaseBuffer(writebuf);
     782             : 
     783           0 :     if (BufferIsValid(bucketbuf))
     784           0 :         UnlockReleaseBuffer(bucketbuf);
     785             : 
     786             :     /*
     787             :      * Note: in normal operation, we'd update the bitmap and meta page while
     788             :      * still holding lock on the primary bucket page and overflow pages.  But
     789             :      * during replay it's not necessary to hold those locks, since no other
     790             :      * index updates can be happening concurrently.
     791             :      */
     792             :     /* replay the record for bitmap page */
     793           0 :     if (XLogReadBufferForRedo(record, 5, &mapbuf) == BLK_NEEDS_REDO)
     794             :     {
     795           0 :         Page        mappage = (Page) BufferGetPage(mapbuf);
     796           0 :         uint32     *freep = NULL;
     797             :         char       *data;
     798             :         uint32     *bitmap_page_bit;
     799             :         Size        datalen;
     800             : 
     801           0 :         freep = HashPageGetBitmap(mappage);
     802             : 
     803           0 :         data = XLogRecGetBlockData(record, 5, &datalen);
     804           0 :         bitmap_page_bit = (uint32 *) data;
     805             : 
     806           0 :         CLRBIT(freep, *bitmap_page_bit);
     807             : 
     808           0 :         PageSetLSN(mappage, lsn);
     809           0 :         MarkBufferDirty(mapbuf);
     810             :     }
     811           0 :     if (BufferIsValid(mapbuf))
     812           0 :         UnlockReleaseBuffer(mapbuf);
     813             : 
     814             :     /* replay the record for meta page */
     815           0 :     if (XLogRecHasBlockRef(record, 6))
     816             :     {
     817             :         Buffer      metabuf;
     818             : 
     819           0 :         if (XLogReadBufferForRedo(record, 6, &metabuf) == BLK_NEEDS_REDO)
     820             :         {
     821             :             HashMetaPage metap;
     822             :             Page        page;
     823             :             char       *data;
     824             :             uint32     *firstfree_ovflpage;
     825             :             Size        datalen;
     826             : 
     827           0 :             data = XLogRecGetBlockData(record, 6, &datalen);
     828           0 :             firstfree_ovflpage = (uint32 *) data;
     829             : 
     830           0 :             page = BufferGetPage(metabuf);
     831           0 :             metap = HashPageGetMeta(page);
     832           0 :             metap->hashm_firstfree = *firstfree_ovflpage;
     833             : 
     834           0 :             PageSetLSN(page, lsn);
     835           0 :             MarkBufferDirty(metabuf);
     836             :         }
     837           0 :         if (BufferIsValid(metabuf))
     838           0 :             UnlockReleaseBuffer(metabuf);
     839             :     }
     840           0 : }
     841             : 
     842             : /*
     843             :  * replay delete operation of hash index
     844             :  */
     845             : static void
     846           0 : hash_xlog_delete(XLogReaderState *record)
     847             : {
     848           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     849           0 :     xl_hash_delete *xldata = (xl_hash_delete *) XLogRecGetData(record);
     850           0 :     Buffer      bucketbuf = InvalidBuffer;
     851             :     Buffer      deletebuf;
     852             :     Page        page;
     853             :     XLogRedoAction action;
     854             : 
     855             :     /*
     856             :      * Ensure we have a cleanup lock on primary bucket page before we start
     857             :      * with the actual replay operation.  This is to ensure that neither a
     858             :      * scan can start nor a scan can be already-in-progress during the replay
     859             :      * of this operation.  If we allow scans during this operation, then they
     860             :      * can miss some records or show the same record multiple times.
     861             :      */
     862           0 :     if (xldata->is_primary_bucket_page)
     863           0 :         action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &deletebuf);
     864             :     else
     865             :     {
     866             :         /*
     867             :          * we don't care for return value as the purpose of reading bucketbuf
     868             :          * is to ensure a cleanup lock on primary bucket page.
     869             :          */
     870           0 :         (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
     871             : 
     872           0 :         action = XLogReadBufferForRedo(record, 1, &deletebuf);
     873             :     }
     874             : 
     875             :     /* replay the record for deleting entries in bucket page */
     876           0 :     if (action == BLK_NEEDS_REDO)
     877             :     {
     878             :         char       *ptr;
     879             :         Size        len;
     880             : 
     881           0 :         ptr = XLogRecGetBlockData(record, 1, &len);
     882             : 
     883           0 :         page = (Page) BufferGetPage(deletebuf);
     884             : 
     885           0 :         if (len > 0)
     886             :         {
     887             :             OffsetNumber *unused;
     888             :             OffsetNumber *unend;
     889             : 
     890           0 :             unused = (OffsetNumber *) ptr;
     891           0 :             unend = (OffsetNumber *) ((char *) ptr + len);
     892             : 
     893           0 :             if ((unend - unused) > 0)
     894           0 :                 PageIndexMultiDelete(page, unused, unend - unused);
     895             :         }
     896             : 
     897             :         /*
     898             :          * Mark the page as not containing any LP_DEAD items only if
     899             :          * clear_dead_marking flag is set to true. See comments in
     900             :          * hashbucketcleanup() for details.
     901             :          */
     902           0 :         if (xldata->clear_dead_marking)
     903             :         {
     904             :             HashPageOpaque pageopaque;
     905             : 
     906           0 :             pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
     907           0 :             pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
     908             :         }
     909             : 
     910           0 :         PageSetLSN(page, lsn);
     911           0 :         MarkBufferDirty(deletebuf);
     912             :     }
     913           0 :     if (BufferIsValid(deletebuf))
     914           0 :         UnlockReleaseBuffer(deletebuf);
     915             : 
     916           0 :     if (BufferIsValid(bucketbuf))
     917           0 :         UnlockReleaseBuffer(bucketbuf);
     918           0 : }
     919             : 
     920             : /*
     921             :  * replay split cleanup flag operation for primary bucket page.
     922             :  */
     923             : static void
     924           0 : hash_xlog_split_cleanup(XLogReaderState *record)
     925             : {
     926           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     927             :     Buffer      buffer;
     928             :     Page        page;
     929             : 
     930           0 :     if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
     931             :     {
     932             :         HashPageOpaque bucket_opaque;
     933             : 
     934           0 :         page = (Page) BufferGetPage(buffer);
     935             : 
     936           0 :         bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
     937           0 :         bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
     938           0 :         PageSetLSN(page, lsn);
     939           0 :         MarkBufferDirty(buffer);
     940             :     }
     941           0 :     if (BufferIsValid(buffer))
     942           0 :         UnlockReleaseBuffer(buffer);
     943           0 : }
     944             : 
     945             : /*
     946             :  * replay for update meta page
     947             :  */
     948             : static void
     949           0 : hash_xlog_update_meta_page(XLogReaderState *record)
     950             : {
     951             :     HashMetaPage metap;
     952           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     953           0 :     xl_hash_update_meta_page *xldata = (xl_hash_update_meta_page *) XLogRecGetData(record);
     954             :     Buffer      metabuf;
     955             :     Page        page;
     956             : 
     957           0 :     if (XLogReadBufferForRedo(record, 0, &metabuf) == BLK_NEEDS_REDO)
     958             :     {
     959           0 :         page = BufferGetPage(metabuf);
     960           0 :         metap = HashPageGetMeta(page);
     961             : 
     962           0 :         metap->hashm_ntuples = xldata->ntuples;
     963             : 
     964           0 :         PageSetLSN(page, lsn);
     965           0 :         MarkBufferDirty(metabuf);
     966             :     }
     967           0 :     if (BufferIsValid(metabuf))
     968           0 :         UnlockReleaseBuffer(metabuf);
     969           0 : }
     970             : 
     971             : /*
     972             :  * replay delete operation in hash index to remove
     973             :  * tuples marked as DEAD during index tuple insertion.
     974             :  */
     975             : static void
     976           0 : hash_xlog_vacuum_one_page(XLogReaderState *record)
     977             : {
     978           0 :     XLogRecPtr  lsn = record->EndRecPtr;
     979             :     xl_hash_vacuum_one_page *xldata;
     980             :     Buffer      buffer;
     981             :     Buffer      metabuf;
     982             :     Page        page;
     983             :     XLogRedoAction action;
     984             :     HashPageOpaque pageopaque;
     985             : 
     986           0 :     xldata = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
     987             : 
     988             :     /*
     989             :      * If we have any conflict processing to do, it must happen before we
     990             :      * update the page.
     991             :      *
     992             :      * Hash index records that are marked as LP_DEAD and being removed during
     993             :      * hash index tuple insertion can conflict with standby queries. You might
     994             :      * think that vacuum records would conflict as well, but we've handled
     995             :      * that already.  XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
     996             :      * cleaned by the vacuum of the heap and so we can resolve any conflicts
     997             :      * just once when that arrives.  After that we know that no conflicts
     998             :      * exist from individual hash index vacuum records on that index.
     999             :      */
    1000           0 :     if (InHotStandby)
    1001             :     {
    1002             :         RelFileNode rnode;
    1003             : 
    1004           0 :         XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
    1005           0 :         ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
    1006             :     }
    1007             : 
    1008           0 :     action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);
    1009             : 
    1010           0 :     if (action == BLK_NEEDS_REDO)
    1011             :     {
    1012           0 :         page = (Page) BufferGetPage(buffer);
    1013             : 
    1014           0 :         if (XLogRecGetDataLen(record) > SizeOfHashVacuumOnePage)
    1015             :         {
    1016             :             OffsetNumber *unused;
    1017             : 
    1018           0 :             unused = (OffsetNumber *) ((char *) xldata + SizeOfHashVacuumOnePage);
    1019             : 
    1020           0 :             PageIndexMultiDelete(page, unused, xldata->ntuples);
    1021             :         }
    1022             : 
    1023             :         /*
    1024             :          * Mark the page as not containing any LP_DEAD items. See comments in
    1025             :          * _hash_vacuum_one_page() for details.
    1026             :          */
    1027           0 :         pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
    1028           0 :         pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
    1029             : 
    1030           0 :         PageSetLSN(page, lsn);
    1031           0 :         MarkBufferDirty(buffer);
    1032             :     }
    1033           0 :     if (BufferIsValid(buffer))
    1034           0 :         UnlockReleaseBuffer(buffer);
    1035             : 
    1036           0 :     if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
    1037             :     {
    1038             :         Page        metapage;
    1039             :         HashMetaPage metap;
    1040             : 
    1041           0 :         metapage = BufferGetPage(metabuf);
    1042           0 :         metap = HashPageGetMeta(metapage);
    1043             : 
    1044           0 :         metap->hashm_ntuples -= xldata->ntuples;
    1045             : 
    1046           0 :         PageSetLSN(metapage, lsn);
    1047           0 :         MarkBufferDirty(metabuf);
    1048             :     }
    1049           0 :     if (BufferIsValid(metabuf))
    1050           0 :         UnlockReleaseBuffer(metabuf);
    1051           0 : }
    1052             : 
    1053             : void
    1054           0 : hash_redo(XLogReaderState *record)
    1055             : {
    1056           0 :     uint8       info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
    1057             : 
    1058           0 :     switch (info)
    1059             :     {
    1060             :         case XLOG_HASH_INIT_META_PAGE:
    1061           0 :             hash_xlog_init_meta_page(record);
    1062           0 :             break;
    1063             :         case XLOG_HASH_INIT_BITMAP_PAGE:
    1064           0 :             hash_xlog_init_bitmap_page(record);
    1065           0 :             break;
    1066             :         case XLOG_HASH_INSERT:
    1067           0 :             hash_xlog_insert(record);
    1068           0 :             break;
    1069             :         case XLOG_HASH_ADD_OVFL_PAGE:
    1070           0 :             hash_xlog_add_ovfl_page(record);
    1071           0 :             break;
    1072             :         case XLOG_HASH_SPLIT_ALLOCATE_PAGE:
    1073           0 :             hash_xlog_split_allocate_page(record);
    1074           0 :             break;
    1075             :         case XLOG_HASH_SPLIT_PAGE:
    1076           0 :             hash_xlog_split_page(record);
    1077           0 :             break;
    1078             :         case XLOG_HASH_SPLIT_COMPLETE:
    1079           0 :             hash_xlog_split_complete(record);
    1080           0 :             break;
    1081             :         case XLOG_HASH_MOVE_PAGE_CONTENTS:
    1082           0 :             hash_xlog_move_page_contents(record);
    1083           0 :             break;
    1084             :         case XLOG_HASH_SQUEEZE_PAGE:
    1085           0 :             hash_xlog_squeeze_page(record);
    1086           0 :             break;
    1087             :         case XLOG_HASH_DELETE:
    1088           0 :             hash_xlog_delete(record);
    1089           0 :             break;
    1090             :         case XLOG_HASH_SPLIT_CLEANUP:
    1091           0 :             hash_xlog_split_cleanup(record);
    1092           0 :             break;
    1093             :         case XLOG_HASH_UPDATE_META_PAGE:
    1094           0 :             hash_xlog_update_meta_page(record);
    1095           0 :             break;
    1096             :         case XLOG_HASH_VACUUM_ONE_PAGE:
    1097           0 :             hash_xlog_vacuum_one_page(record);
    1098           0 :             break;
    1099             :         default:
    1100           0 :             elog(PANIC, "hash_redo: unknown op code %u", info);
    1101             :     }
    1102           0 : }
    1103             : 
    1104             : /*
    1105             :  * Mask a hash page before performing consistency checks on it.
    1106             :  */
    1107             : void
    1108           0 : hash_mask(char *pagedata, BlockNumber blkno)
    1109             : {
    1110           0 :     Page        page = (Page) pagedata;
    1111             :     HashPageOpaque opaque;
    1112             :     int         pagetype;
    1113             : 
    1114           0 :     mask_page_lsn_and_checksum(page);
    1115             : 
    1116           0 :     mask_page_hint_bits(page);
    1117           0 :     mask_unused_space(page);
    1118             : 
    1119           0 :     opaque = (HashPageOpaque) PageGetSpecialPointer(page);
    1120             : 
    1121           0 :     pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
    1122           0 :     if (pagetype == LH_UNUSED_PAGE)
    1123             :     {
    1124             :         /*
    1125             :          * Mask everything on a UNUSED page.
    1126             :          */
    1127           0 :         mask_page_content(page);
    1128             :     }
    1129           0 :     else if (pagetype == LH_BUCKET_PAGE ||
    1130             :              pagetype == LH_OVERFLOW_PAGE)
    1131             :     {
    1132             :         /*
    1133             :          * In hash bucket and overflow pages, it is possible to modify the
    1134             :          * LP_FLAGS without emitting any WAL record. Hence, mask the line
    1135             :          * pointer flags. See hashgettuple(), _hash_kill_items() for details.
    1136             :          */
    1137           0 :         mask_lp_flags(page);
    1138             :     }
    1139             : 
    1140             :     /*
    1141             :      * It is possible that the hint bit LH_PAGE_HAS_DEAD_TUPLES may remain
    1142             :      * unlogged. So, mask it. See _hash_kill_items() for details.
    1143             :      */
    1144           0 :     opaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
    1145           0 : }

Generated by: LCOV version 1.13