Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * gistxlog.c
4 : * WAL replay logic for GiST.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/gist/gistxlog.c
12 : *-------------------------------------------------------------------------
13 : */
14 : #include "postgres.h"
15 :
16 : #include "access/bufmask.h"
17 : #include "access/gist_private.h"
18 : #include "access/gistxlog.h"
19 : #include "access/transam.h"
20 : #include "access/xloginsert.h"
21 : #include "access/xlogutils.h"
22 : #include "storage/standby.h"
23 : #include "utils/memutils.h"
24 : #include "utils/rel.h"
25 :
26 : static MemoryContext opCtx; /* working memory for operations */
27 :
28 : /*
29 : * Replay the clearing of F_FOLLOW_RIGHT flag on a child page.
30 : *
31 : * Even if the WAL record includes a full-page image, we have to update the
32 : * follow-right flag, because that change is not included in the full-page
33 : * image. To be sure that the intermediate state with the wrong flag value is
34 : * not visible to concurrent Hot Standby queries, this function handles
35 : * restoring the full-page image as well as updating the flag. (Note that
36 : * we never need to do anything else to the child page in the current WAL
37 : * action.)
38 : */
39 : static void
40 450 : gistRedoClearFollowRight(XLogReaderState *record, uint8 block_id)
41 : {
42 450 : XLogRecPtr lsn = record->EndRecPtr;
43 : Buffer buffer;
44 : Page page;
45 : XLogRedoAction action;
46 :
47 : /*
48 : * Note that we still update the page even if it was restored from a full
49 : * page image, because the updated NSN is not included in the image.
50 : */
51 450 : action = XLogReadBufferForRedo(record, block_id, &buffer);
52 450 : if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
53 : {
54 450 : page = BufferGetPage(buffer);
55 :
56 450 : GistPageSetNSN(page, lsn);
57 450 : GistClearFollowRight(page);
58 :
59 450 : PageSetLSN(page, lsn);
60 450 : MarkBufferDirty(buffer);
61 : }
62 450 : if (BufferIsValid(buffer))
63 450 : UnlockReleaseBuffer(buffer);
64 450 : }
65 :
66 : /*
67 : * redo any page update (except page split)
68 : */
69 : static void
70 57503 : gistRedoPageUpdateRecord(XLogReaderState *record)
71 : {
72 57503 : XLogRecPtr lsn = record->EndRecPtr;
73 57503 : gistxlogPageUpdate *xldata = (gistxlogPageUpdate *) XLogRecGetData(record);
74 : Buffer buffer;
75 : Page page;
76 :
77 57503 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
78 : {
79 : char *begin;
80 : char *data;
81 : Size datalen;
82 57141 : int ninserted PG_USED_FOR_ASSERTS_ONLY = 0;
83 :
84 57141 : data = begin = XLogRecGetBlockData(record, 0, &datalen);
85 :
86 57141 : page = BufferGetPage(buffer);
87 :
88 57141 : if (xldata->ntodelete == 1 && xldata->ntoinsert == 1)
89 20847 : {
90 : /*
91 : * When replacing one tuple with one other tuple, we must use
92 : * PageIndexTupleOverwrite for consistency with gistplacetopage.
93 : */
94 20847 : OffsetNumber offnum = *((OffsetNumber *) data);
95 : IndexTuple itup;
96 : Size itupsize;
97 :
98 20847 : data += sizeof(OffsetNumber);
99 20847 : itup = (IndexTuple) data;
100 20847 : itupsize = IndexTupleSize(itup);
101 20847 : if (!PageIndexTupleOverwrite(page, offnum, itup, itupsize))
102 0 : elog(ERROR, "failed to add item to GiST index page, size %zu bytes", itupsize);
103 20847 : data += itupsize;
104 : /* should be nothing left after consuming 1 tuple */
105 : Assert(data - begin == datalen);
106 : /* update insertion count for assert check below */
107 20847 : ninserted++;
108 : }
109 36294 : else if (xldata->ntodelete > 0)
110 : {
111 : /* Otherwise, delete old tuples if any */
112 466 : OffsetNumber *todelete = (OffsetNumber *) data;
113 :
114 466 : data += sizeof(OffsetNumber) * xldata->ntodelete;
115 :
116 466 : PageIndexMultiDelete(page, todelete, xldata->ntodelete);
117 466 : if (GistPageIsLeaf(page))
118 20 : GistMarkTuplesDeleted(page);
119 : }
120 :
121 : /* Add new tuples if any */
122 57141 : if (data - begin < datalen)
123 : {
124 36274 : OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
125 36212 : OffsetNumberNext(PageGetMaxOffsetNumber(page));
126 :
127 72994 : while (data - begin < datalen)
128 : {
129 36720 : IndexTuple itup = (IndexTuple) data;
130 36720 : Size sz = IndexTupleSize(itup);
131 : OffsetNumber l;
132 :
133 36720 : data += sz;
134 :
135 36720 : l = PageAddItem(page, itup, sz, off, false, false);
136 36720 : if (l == InvalidOffsetNumber)
137 0 : elog(ERROR, "failed to add item to GiST index page, size %zu bytes", sz);
138 36720 : off++;
139 36720 : ninserted++;
140 : }
141 : }
142 :
143 : /* Check that XLOG record contained expected number of tuples */
144 : Assert(ninserted == xldata->ntoinsert);
145 :
146 57141 : PageSetLSN(page, lsn);
147 57141 : MarkBufferDirty(buffer);
148 : }
149 :
150 : /*
151 : * Fix follow-right data on left child page
152 : *
153 : * This must be done while still holding the lock on the target page. Note
154 : * that even if the target page no longer exists, we still attempt to
155 : * replay the change on the child page.
156 : */
157 57503 : if (XLogRecHasBlockRef(record, 1))
158 448 : gistRedoClearFollowRight(record, 1);
159 :
160 57503 : if (BufferIsValid(buffer))
161 57503 : UnlockReleaseBuffer(buffer);
162 57503 : }
163 :
164 :
165 : /*
166 : * redo delete on gist index page to remove tuples marked as DEAD during index
167 : * tuple insertion
168 : */
169 : static void
170 0 : gistRedoDeleteRecord(XLogReaderState *record)
171 : {
172 0 : XLogRecPtr lsn = record->EndRecPtr;
173 0 : gistxlogDelete *xldata = (gistxlogDelete *) XLogRecGetData(record);
174 : Buffer buffer;
175 : Page page;
176 0 : OffsetNumber *toDelete = xldata->offsets;
177 :
178 : /*
179 : * If we have any conflict processing to do, it must happen before we
180 : * update the page.
181 : *
182 : * GiST delete records can conflict with standby queries. You might think
183 : * that vacuum records would conflict as well, but we've handled that
184 : * already. XLOG_HEAP2_PRUNE_VACUUM_SCAN records provide the highest xid
185 : * cleaned by the vacuum of the heap and so we can resolve any conflicts
186 : * just once when that arrives. After that we know that no conflicts
187 : * exist from individual gist vacuum records on that index.
188 : */
189 0 : if (InHotStandby)
190 : {
191 : RelFileLocator rlocator;
192 :
193 0 : XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
194 :
195 0 : ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
196 0 : xldata->isCatalogRel,
197 : rlocator);
198 : }
199 :
200 0 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
201 : {
202 0 : page = BufferGetPage(buffer);
203 :
204 0 : PageIndexMultiDelete(page, toDelete, xldata->ntodelete);
205 :
206 0 : GistClearPageHasGarbage(page);
207 0 : GistMarkTuplesDeleted(page);
208 :
209 0 : PageSetLSN(page, lsn);
210 0 : MarkBufferDirty(buffer);
211 : }
212 :
213 0 : if (BufferIsValid(buffer))
214 0 : UnlockReleaseBuffer(buffer);
215 0 : }
216 :
217 : /*
218 : * Returns an array of index pointers.
219 : */
220 : static IndexTuple *
221 915 : decodePageSplitRecord(char *begin, int len, int *n)
222 : {
223 : char *ptr;
224 915 : int i = 0;
225 : IndexTuple *tuples;
226 :
227 : /* extract the number of tuples */
228 915 : memcpy(n, begin, sizeof(int));
229 915 : ptr = begin + sizeof(int);
230 :
231 915 : tuples = palloc(*n * sizeof(IndexTuple));
232 :
233 79647 : for (i = 0; i < *n; i++)
234 : {
235 : Assert(ptr - begin < len);
236 78732 : tuples[i] = (IndexTuple) ptr;
237 78732 : ptr += IndexTupleSize((IndexTuple) ptr);
238 : }
239 : Assert(ptr - begin == len);
240 :
241 915 : return tuples;
242 : }
243 :
244 : static void
245 455 : gistRedoPageSplitRecord(XLogReaderState *record)
246 : {
247 455 : XLogRecPtr lsn = record->EndRecPtr;
248 455 : gistxlogPageSplit *xldata = (gistxlogPageSplit *) XLogRecGetData(record);
249 455 : Buffer firstbuffer = InvalidBuffer;
250 : Buffer buffer;
251 : Page page;
252 : int i;
253 455 : bool isrootsplit = false;
254 :
255 : /*
256 : * We must hold lock on the first-listed page throughout the action,
257 : * including while updating the left child page (if any). We can unlock
258 : * remaining pages in the list as soon as they've been written, because
259 : * there is no path for concurrent queries to reach those pages without
260 : * first visiting the first-listed page.
261 : */
262 :
263 : /* loop around all pages */
264 1370 : for (i = 0; i < xldata->npage; i++)
265 : {
266 : int flags;
267 : char *data;
268 : Size datalen;
269 : int num;
270 : BlockNumber blkno;
271 : IndexTuple *tuples;
272 :
273 915 : XLogRecGetBlockTag(record, i + 1, NULL, NULL, &blkno);
274 915 : if (blkno == GIST_ROOT_BLKNO)
275 : {
276 : Assert(i == 0);
277 5 : isrootsplit = true;
278 : }
279 :
280 915 : buffer = XLogInitBufferForRedo(record, i + 1);
281 915 : page = BufferGetPage(buffer);
282 915 : data = XLogRecGetBlockData(record, i + 1, &datalen);
283 :
284 915 : tuples = decodePageSplitRecord(data, datalen, &num);
285 :
286 : /* ok, clear buffer */
287 915 : if (xldata->origleaf && blkno != GIST_ROOT_BLKNO)
288 906 : flags = F_LEAF;
289 : else
290 9 : flags = 0;
291 915 : GISTInitBuffer(buffer, flags);
292 :
293 : /* and fill it */
294 915 : gistfillbuffer(page, tuples, num, FirstOffsetNumber);
295 :
296 915 : if (blkno == GIST_ROOT_BLKNO)
297 : {
298 5 : GistPageGetOpaque(page)->rightlink = InvalidBlockNumber;
299 5 : GistPageSetNSN(page, xldata->orignsn);
300 5 : GistClearFollowRight(page);
301 : }
302 : else
303 : {
304 910 : if (i < xldata->npage - 1)
305 : {
306 : BlockNumber nextblkno;
307 :
308 455 : XLogRecGetBlockTag(record, i + 2, NULL, NULL, &nextblkno);
309 455 : GistPageGetOpaque(page)->rightlink = nextblkno;
310 : }
311 : else
312 455 : GistPageGetOpaque(page)->rightlink = xldata->origrlink;
313 910 : GistPageSetNSN(page, xldata->orignsn);
314 910 : if (i < xldata->npage - 1 && !isrootsplit &&
315 450 : xldata->markfollowright)
316 450 : GistMarkFollowRight(page);
317 : else
318 460 : GistClearFollowRight(page);
319 : }
320 :
321 915 : PageSetLSN(page, lsn);
322 915 : MarkBufferDirty(buffer);
323 :
324 915 : if (i == 0)
325 455 : firstbuffer = buffer;
326 : else
327 460 : UnlockReleaseBuffer(buffer);
328 : }
329 :
330 : /* Fix follow-right data on left child page, if any */
331 455 : if (XLogRecHasBlockRef(record, 0))
332 2 : gistRedoClearFollowRight(record, 0);
333 :
334 : /* Finally, release lock on the first page */
335 455 : UnlockReleaseBuffer(firstbuffer);
336 455 : }
337 :
338 : /* redo page deletion */
339 : static void
340 81 : gistRedoPageDelete(XLogReaderState *record)
341 : {
342 81 : XLogRecPtr lsn = record->EndRecPtr;
343 81 : gistxlogPageDelete *xldata = (gistxlogPageDelete *) XLogRecGetData(record);
344 : Buffer parentBuffer;
345 : Buffer leafBuffer;
346 :
347 81 : if (XLogReadBufferForRedo(record, 0, &leafBuffer) == BLK_NEEDS_REDO)
348 : {
349 81 : Page page = BufferGetPage(leafBuffer);
350 :
351 81 : GistPageSetDeleted(page, xldata->deleteXid);
352 :
353 81 : PageSetLSN(page, lsn);
354 81 : MarkBufferDirty(leafBuffer);
355 : }
356 :
357 81 : if (XLogReadBufferForRedo(record, 1, &parentBuffer) == BLK_NEEDS_REDO)
358 : {
359 80 : Page page = BufferGetPage(parentBuffer);
360 :
361 80 : PageIndexTupleDelete(page, xldata->downlinkOffset);
362 :
363 80 : PageSetLSN(page, lsn);
364 80 : MarkBufferDirty(parentBuffer);
365 : }
366 :
367 81 : if (BufferIsValid(parentBuffer))
368 81 : UnlockReleaseBuffer(parentBuffer);
369 81 : if (BufferIsValid(leafBuffer))
370 81 : UnlockReleaseBuffer(leafBuffer);
371 81 : }
372 :
373 : static void
374 0 : gistRedoPageReuse(XLogReaderState *record)
375 : {
376 0 : gistxlogPageReuse *xlrec = (gistxlogPageReuse *) XLogRecGetData(record);
377 :
378 : /*
379 : * PAGE_REUSE records exist to provide a conflict point when we reuse
380 : * pages in the index via the FSM. That's all they do though.
381 : *
382 : * snapshotConflictHorizon was the page's deleteXid. The
383 : * GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable()
384 : * conceptually mirrors the PGPROC->xmin > limitXmin test in
385 : * GetConflictingVirtualXIDs(). Consequently, one XID value achieves the
386 : * same exclusion effect on primary and standby.
387 : */
388 0 : if (InHotStandby)
389 0 : ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon,
390 0 : xlrec->isCatalogRel,
391 : xlrec->locator);
392 0 : }
393 :
394 : void
395 58039 : gist_redo(XLogReaderState *record)
396 : {
397 58039 : uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
398 : MemoryContext oldCxt;
399 :
400 : /*
401 : * GiST indexes do not require any conflict processing. NB: If we ever
402 : * implement a similar optimization we have in b-tree, and remove killed
403 : * tuples outside VACUUM, we'll need to handle that here.
404 : */
405 :
406 58039 : oldCxt = MemoryContextSwitchTo(opCtx);
407 58039 : switch (info)
408 : {
409 57503 : case XLOG_GIST_PAGE_UPDATE:
410 57503 : gistRedoPageUpdateRecord(record);
411 57503 : break;
412 0 : case XLOG_GIST_DELETE:
413 0 : gistRedoDeleteRecord(record);
414 0 : break;
415 0 : case XLOG_GIST_PAGE_REUSE:
416 0 : gistRedoPageReuse(record);
417 0 : break;
418 455 : case XLOG_GIST_PAGE_SPLIT:
419 455 : gistRedoPageSplitRecord(record);
420 455 : break;
421 81 : case XLOG_GIST_PAGE_DELETE:
422 81 : gistRedoPageDelete(record);
423 81 : break;
424 0 : default:
425 0 : elog(PANIC, "gist_redo: unknown op code %u", info);
426 : }
427 :
428 58039 : MemoryContextSwitchTo(oldCxt);
429 58039 : MemoryContextReset(opCtx);
430 58039 : }
431 :
432 : void
433 215 : gist_xlog_startup(void)
434 : {
435 215 : opCtx = createTempGistContext();
436 215 : }
437 :
438 : void
439 155 : gist_xlog_cleanup(void)
440 : {
441 155 : MemoryContextDelete(opCtx);
442 155 : }
443 :
444 : /*
445 : * Mask a Gist page before running consistency checks on it.
446 : */
447 : void
448 117334 : gist_mask(char *pagedata, BlockNumber blkno)
449 : {
450 117334 : Page page = (Page) pagedata;
451 :
452 117334 : mask_page_lsn_and_checksum(page);
453 :
454 117334 : mask_page_hint_bits(page);
455 117334 : mask_unused_space(page);
456 :
457 : /*
458 : * NSN is nothing but a special purpose LSN. Hence, mask it for the same
459 : * reason as mask_page_lsn_and_checksum.
460 : */
461 117334 : GistPageSetNSN(page, (uint64) MASK_MARKER);
462 :
463 : /*
464 : * We update F_FOLLOW_RIGHT flag on the left child after writing WAL
465 : * record. Hence, mask this flag. See gistplacetopage() for details.
466 : */
467 117334 : GistMarkFollowRight(page);
468 :
469 117334 : if (GistPageIsLeaf(page))
470 : {
471 : /*
472 : * In gist leaf pages, it is possible to modify the LP_FLAGS without
473 : * emitting any WAL record. Hence, mask the line pointer flags. See
474 : * gistkillitems() for details.
475 : */
476 74570 : mask_lp_flags(page);
477 : }
478 :
479 : /*
480 : * During gist redo, we never mark a page as garbage. Hence, mask it to
481 : * ignore any differences.
482 : */
483 117334 : GistClearPageHasGarbage(page);
484 117334 : }
485 :
486 : /*
487 : * Write WAL record of a page split.
488 : */
489 : XLogRecPtr
490 1743 : gistXLogSplit(bool page_is_leaf,
491 : SplitPageLayout *dist,
492 : BlockNumber origrlink, GistNSN orignsn,
493 : Buffer leftchildbuf, bool markfollowright)
494 : {
495 : gistxlogPageSplit xlrec;
496 : SplitPageLayout *ptr;
497 1743 : int npage = 0;
498 : XLogRecPtr recptr;
499 : int i;
500 :
501 5285 : for (ptr = dist; ptr; ptr = ptr->next)
502 3542 : npage++;
503 :
504 1743 : xlrec.origrlink = origrlink;
505 1743 : xlrec.orignsn = orignsn;
506 1743 : xlrec.origleaf = page_is_leaf;
507 1743 : xlrec.npage = (uint16) npage;
508 1743 : xlrec.markfollowright = markfollowright;
509 :
510 1743 : XLogBeginInsert();
511 :
512 : /*
513 : * Include a full page image of the child buf. (only necessary if a
514 : * checkpoint happened since the child page was split)
515 : */
516 1743 : if (BufferIsValid(leftchildbuf))
517 6 : XLogRegisterBuffer(0, leftchildbuf, REGBUF_STANDARD);
518 :
519 : /*
520 : * NOTE: We register a lot of data. The caller must've called
521 : * XLogEnsureRecordSpace() to prepare for that. We cannot do it here,
522 : * because we're already in a critical section. If you change the number
523 : * of buffer or data registrations here, make sure you modify the
524 : * XLogEnsureRecordSpace() calls accordingly!
525 : */
526 1743 : XLogRegisterData(&xlrec, sizeof(gistxlogPageSplit));
527 :
528 1743 : i = 1;
529 5285 : for (ptr = dist; ptr; ptr = ptr->next)
530 : {
531 3542 : XLogRegisterBuffer(i, ptr->buffer, REGBUF_WILL_INIT);
532 3542 : XLogRegisterBufData(i, &(ptr->block.num), sizeof(int));
533 3542 : XLogRegisterBufData(i, ptr->list, ptr->lenlist);
534 3542 : i++;
535 : }
536 :
537 1743 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT);
538 :
539 1743 : return recptr;
540 : }
541 :
542 : /*
543 : * Write XLOG record describing a page deletion. This also includes removal of
544 : * downlink from the parent page.
545 : */
546 : XLogRecPtr
547 81 : gistXLogPageDelete(Buffer buffer, FullTransactionId xid,
548 : Buffer parentBuffer, OffsetNumber downlinkOffset)
549 : {
550 : gistxlogPageDelete xlrec;
551 : XLogRecPtr recptr;
552 :
553 81 : xlrec.deleteXid = xid;
554 81 : xlrec.downlinkOffset = downlinkOffset;
555 :
556 81 : XLogBeginInsert();
557 81 : XLogRegisterData(&xlrec, SizeOfGistxlogPageDelete);
558 :
559 81 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
560 81 : XLogRegisterBuffer(1, parentBuffer, REGBUF_STANDARD);
561 :
562 81 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_DELETE);
563 :
564 81 : return recptr;
565 : }
566 :
567 : /*
568 : * Write XLOG record about reuse of a deleted page.
569 : */
570 : void
571 0 : gistXLogPageReuse(Relation rel, Relation heaprel,
572 : BlockNumber blkno, FullTransactionId deleteXid)
573 : {
574 : gistxlogPageReuse xlrec_reuse;
575 :
576 : /*
577 : * Note that we don't register the buffer with the record, because this
578 : * operation doesn't modify the page. This record only exists to provide a
579 : * conflict point for Hot Standby.
580 : */
581 :
582 : /* XLOG stuff */
583 0 : xlrec_reuse.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
584 0 : xlrec_reuse.locator = rel->rd_locator;
585 0 : xlrec_reuse.block = blkno;
586 0 : xlrec_reuse.snapshotConflictHorizon = deleteXid;
587 :
588 0 : XLogBeginInsert();
589 0 : XLogRegisterData(&xlrec_reuse, SizeOfGistxlogPageReuse);
590 :
591 0 : XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_REUSE);
592 0 : }
593 :
594 : /*
595 : * Write XLOG record describing a page update. The update can include any
596 : * number of deletions and/or insertions of tuples on a single index page.
597 : *
598 : * If this update inserts a downlink for a split page, also record that
599 : * the F_FOLLOW_RIGHT flag on the child page is cleared and NSN set.
600 : *
601 : * Note that both the todelete array and the tuples are marked as belonging
602 : * to the target buffer; they need not be stored in XLOG if XLogInsert decides
603 : * to log the whole buffer contents instead.
604 : */
605 : XLogRecPtr
606 247558 : gistXLogUpdate(Buffer buffer,
607 : OffsetNumber *todelete, int ntodelete,
608 : IndexTuple *itup, int ituplen,
609 : Buffer leftchildbuf)
610 : {
611 : gistxlogPageUpdate xlrec;
612 : int i;
613 : XLogRecPtr recptr;
614 :
615 247558 : xlrec.ntodelete = ntodelete;
616 247558 : xlrec.ntoinsert = ituplen;
617 :
618 247558 : XLogBeginInsert();
619 247558 : XLogRegisterData(&xlrec, sizeof(gistxlogPageUpdate));
620 :
621 247558 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
622 247558 : XLogRegisterBufData(0, todelete, sizeof(OffsetNumber) * ntodelete);
623 :
624 : /* new tuples */
625 496562 : for (i = 0; i < ituplen; i++)
626 249004 : XLogRegisterBufData(0, itup[i], IndexTupleSize(itup[i]));
627 :
628 : /*
629 : * Include a full page image of the child buf. (only necessary if a
630 : * checkpoint happened since the child page was split)
631 : */
632 247558 : if (BufferIsValid(leftchildbuf))
633 1681 : XLogRegisterBuffer(1, leftchildbuf, REGBUF_STANDARD);
634 :
635 247558 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE);
636 :
637 247558 : return recptr;
638 : }
639 :
640 : /*
641 : * Write XLOG record describing a delete of leaf index tuples marked as DEAD
642 : * during new tuple insertion. One may think that this case is already covered
643 : * by gistXLogUpdate(). But deletion of index tuples might conflict with
644 : * standby queries and needs special handling.
645 : */
646 : XLogRecPtr
647 0 : gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete,
648 : TransactionId snapshotConflictHorizon, Relation heaprel)
649 : {
650 : gistxlogDelete xlrec;
651 : XLogRecPtr recptr;
652 :
653 0 : xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
654 0 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
655 0 : xlrec.ntodelete = ntodelete;
656 :
657 0 : XLogBeginInsert();
658 0 : XLogRegisterData(&xlrec, SizeOfGistxlogDelete);
659 :
660 : /*
661 : * We need the target-offsets array whether or not we store the whole
662 : * buffer, to allow us to find the snapshotConflictHorizon on a standby
663 : * server.
664 : */
665 0 : XLogRegisterData(todelete, ntodelete * sizeof(OffsetNumber));
666 :
667 0 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
668 :
669 0 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_DELETE);
670 :
671 0 : return recptr;
672 : }
|