Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * gistxlog.c
4 : * WAL replay logic for GiST.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/gist/gistxlog.c
12 : *-------------------------------------------------------------------------
13 : */
14 : #include "postgres.h"
15 :
16 : #include "access/bufmask.h"
17 : #include "access/gist_private.h"
18 : #include "access/gistxlog.h"
19 : #include "access/transam.h"
20 : #include "access/xloginsert.h"
21 : #include "access/xlogutils.h"
22 : #include "storage/standby.h"
23 : #include "utils/memutils.h"
24 : #include "utils/rel.h"
25 :
26 : static MemoryContext opCtx; /* working memory for operations */
27 :
28 : /*
29 : * Replay the clearing of F_FOLLOW_RIGHT flag on a child page.
30 : *
31 : * Even if the WAL record includes a full-page image, we have to update the
32 : * follow-right flag, because that change is not included in the full-page
33 : * image. To be sure that the intermediate state with the wrong flag value is
34 : * not visible to concurrent Hot Standby queries, this function handles
35 : * restoring the full-page image as well as updating the flag. (Note that
36 : * we never need to do anything else to the child page in the current WAL
37 : * action.)
38 : */
39 : static void
40 900 : gistRedoClearFollowRight(XLogReaderState *record, uint8 block_id)
41 : {
42 900 : XLogRecPtr lsn = record->EndRecPtr;
43 : Buffer buffer;
44 : Page page;
45 : XLogRedoAction action;
46 :
47 : /*
48 : * Note that we still update the page even if it was restored from a full
49 : * page image, because the updated NSN is not included in the image.
50 : */
51 900 : action = XLogReadBufferForRedo(record, block_id, &buffer);
52 900 : if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
53 : {
54 900 : page = BufferGetPage(buffer);
55 :
56 900 : GistPageSetNSN(page, lsn);
57 900 : GistClearFollowRight(page);
58 :
59 900 : PageSetLSN(page, lsn);
60 900 : MarkBufferDirty(buffer);
61 : }
62 900 : if (BufferIsValid(buffer))
63 900 : UnlockReleaseBuffer(buffer);
64 900 : }
65 :
66 : /*
67 : * redo any page update (except page split)
68 : */
69 : static void
70 115034 : gistRedoPageUpdateRecord(XLogReaderState *record)
71 : {
72 115034 : XLogRecPtr lsn = record->EndRecPtr;
73 115034 : gistxlogPageUpdate *xldata = (gistxlogPageUpdate *) XLogRecGetData(record);
74 : Buffer buffer;
75 : Page page;
76 :
77 115034 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
78 : {
79 : char *begin;
80 : char *data;
81 : Size datalen;
82 114282 : int ninserted PG_USED_FOR_ASSERTS_ONLY = 0;
83 :
84 114282 : data = begin = XLogRecGetBlockData(record, 0, &datalen);
85 :
86 114282 : page = BufferGetPage(buffer);
87 :
88 114282 : if (xldata->ntodelete == 1 && xldata->ntoinsert == 1)
89 41726 : {
90 : /*
91 : * When replacing one tuple with one other tuple, we must use
92 : * PageIndexTupleOverwrite for consistency with gistplacetopage.
93 : */
94 41726 : OffsetNumber offnum = *((OffsetNumber *) data);
95 : IndexTuple itup;
96 : Size itupsize;
97 :
98 41726 : data += sizeof(OffsetNumber);
99 41726 : itup = (IndexTuple) data;
100 41726 : itupsize = IndexTupleSize(itup);
101 41726 : if (!PageIndexTupleOverwrite(page, offnum, itup, itupsize))
102 0 : elog(ERROR, "failed to add item to GiST index page, size %zu bytes", itupsize);
103 41726 : data += itupsize;
104 : /* should be nothing left after consuming 1 tuple */
105 : Assert(data - begin == datalen);
106 : /* update insertion count for assert check below */
107 41726 : ninserted++;
108 : }
109 72556 : else if (xldata->ntodelete > 0)
110 : {
111 : /* Otherwise, delete old tuples if any */
112 898 : OffsetNumber *todelete = (OffsetNumber *) data;
113 :
114 898 : data += sizeof(OffsetNumber) * xldata->ntodelete;
115 :
116 898 : PageIndexMultiDelete(page, todelete, xldata->ntodelete);
117 898 : if (GistPageIsLeaf(page))
118 14 : GistMarkTuplesDeleted(page);
119 : }
120 :
121 : /* Add new tuples if any */
122 114282 : if (data - begin < datalen)
123 : {
124 72542 : OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
125 72412 : OffsetNumberNext(PageGetMaxOffsetNumber(page));
126 :
127 145968 : while (data - begin < datalen)
128 : {
129 73426 : IndexTuple itup = (IndexTuple) data;
130 73426 : Size sz = IndexTupleSize(itup);
131 : OffsetNumber l;
132 :
133 73426 : data += sz;
134 :
135 73426 : l = PageAddItem(page, itup, sz, off, false, false);
136 73426 : if (l == InvalidOffsetNumber)
137 0 : elog(ERROR, "failed to add item to GiST index page, size %zu bytes", sz);
138 73426 : off++;
139 73426 : ninserted++;
140 : }
141 : }
142 :
143 : /* Check that XLOG record contained expected number of tuples */
144 : Assert(ninserted == xldata->ntoinsert);
145 :
146 114282 : PageSetLSN(page, lsn);
147 114282 : MarkBufferDirty(buffer);
148 : }
149 :
150 : /*
151 : * Fix follow-right data on left child page
152 : *
153 : * This must be done while still holding the lock on the target page. Note
154 : * that even if the target page no longer exists, we still attempt to
155 : * replay the change on the child page.
156 : */
157 115034 : if (XLogRecHasBlockRef(record, 1))
158 896 : gistRedoClearFollowRight(record, 1);
159 :
160 115034 : if (BufferIsValid(buffer))
161 115034 : UnlockReleaseBuffer(buffer);
162 115034 : }
163 :
164 :
165 : /*
166 : * redo delete on gist index page to remove tuples marked as DEAD during index
167 : * tuple insertion
168 : */
169 : static void
170 0 : gistRedoDeleteRecord(XLogReaderState *record)
171 : {
172 0 : XLogRecPtr lsn = record->EndRecPtr;
173 0 : gistxlogDelete *xldata = (gistxlogDelete *) XLogRecGetData(record);
174 : Buffer buffer;
175 : Page page;
176 0 : OffsetNumber *toDelete = xldata->offsets;
177 :
178 : /*
179 : * If we have any conflict processing to do, it must happen before we
180 : * update the page.
181 : *
182 : * GiST delete records can conflict with standby queries. You might think
183 : * that vacuum records would conflict as well, but we've handled that
184 : * already. XLOG_HEAP2_PRUNE_VACUUM_SCAN records provide the highest xid
185 : * cleaned by the vacuum of the heap and so we can resolve any conflicts
186 : * just once when that arrives. After that we know that no conflicts
187 : * exist from individual gist vacuum records on that index.
188 : */
189 0 : if (InHotStandby)
190 : {
191 : RelFileLocator rlocator;
192 :
193 0 : XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
194 :
195 0 : ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
196 0 : xldata->isCatalogRel,
197 : rlocator);
198 : }
199 :
200 0 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
201 : {
202 0 : page = BufferGetPage(buffer);
203 :
204 0 : PageIndexMultiDelete(page, toDelete, xldata->ntodelete);
205 :
206 0 : GistClearPageHasGarbage(page);
207 0 : GistMarkTuplesDeleted(page);
208 :
209 0 : PageSetLSN(page, lsn);
210 0 : MarkBufferDirty(buffer);
211 : }
212 :
213 0 : if (BufferIsValid(buffer))
214 0 : UnlockReleaseBuffer(buffer);
215 0 : }
216 :
217 : /*
218 : * Returns an array of index pointers.
219 : */
220 : static IndexTuple *
221 1830 : decodePageSplitRecord(char *begin, int len, int *n)
222 : {
223 : char *ptr;
224 1830 : int i = 0;
225 : IndexTuple *tuples;
226 :
227 : /* extract the number of tuples */
228 1830 : memcpy(n, begin, sizeof(int));
229 1830 : ptr = begin + sizeof(int);
230 :
231 1830 : tuples = palloc(*n * sizeof(IndexTuple));
232 :
233 159294 : for (i = 0; i < *n; i++)
234 : {
235 : Assert(ptr - begin < len);
236 157464 : tuples[i] = (IndexTuple) ptr;
237 157464 : ptr += IndexTupleSize((IndexTuple) ptr);
238 : }
239 : Assert(ptr - begin == len);
240 :
241 1830 : return tuples;
242 : }
243 :
244 : static void
245 910 : gistRedoPageSplitRecord(XLogReaderState *record)
246 : {
247 910 : XLogRecPtr lsn = record->EndRecPtr;
248 910 : gistxlogPageSplit *xldata = (gistxlogPageSplit *) XLogRecGetData(record);
249 910 : Buffer firstbuffer = InvalidBuffer;
250 : Buffer buffer;
251 : Page page;
252 : int i;
253 910 : bool isrootsplit = false;
254 :
255 : /*
256 : * We must hold lock on the first-listed page throughout the action,
257 : * including while updating the left child page (if any). We can unlock
258 : * remaining pages in the list as soon as they've been written, because
259 : * there is no path for concurrent queries to reach those pages without
260 : * first visiting the first-listed page.
261 : */
262 :
263 : /* loop around all pages */
264 2740 : for (i = 0; i < xldata->npage; i++)
265 : {
266 : int flags;
267 : char *data;
268 : Size datalen;
269 : int num;
270 : BlockNumber blkno;
271 : IndexTuple *tuples;
272 :
273 1830 : XLogRecGetBlockTag(record, i + 1, NULL, NULL, &blkno);
274 1830 : if (blkno == GIST_ROOT_BLKNO)
275 : {
276 : Assert(i == 0);
277 10 : isrootsplit = true;
278 : }
279 :
280 1830 : buffer = XLogInitBufferForRedo(record, i + 1);
281 1830 : page = BufferGetPage(buffer);
282 1830 : data = XLogRecGetBlockData(record, i + 1, &datalen);
283 :
284 1830 : tuples = decodePageSplitRecord(data, datalen, &num);
285 :
286 : /* ok, clear buffer */
287 1830 : if (xldata->origleaf && blkno != GIST_ROOT_BLKNO)
288 1812 : flags = F_LEAF;
289 : else
290 18 : flags = 0;
291 1830 : GISTInitBuffer(buffer, flags);
292 :
293 : /* and fill it */
294 1830 : gistfillbuffer(page, tuples, num, FirstOffsetNumber);
295 :
296 1830 : if (blkno == GIST_ROOT_BLKNO)
297 : {
298 10 : GistPageGetOpaque(page)->rightlink = InvalidBlockNumber;
299 10 : GistPageSetNSN(page, xldata->orignsn);
300 10 : GistClearFollowRight(page);
301 : }
302 : else
303 : {
304 1820 : if (i < xldata->npage - 1)
305 : {
306 : BlockNumber nextblkno;
307 :
308 910 : XLogRecGetBlockTag(record, i + 2, NULL, NULL, &nextblkno);
309 910 : GistPageGetOpaque(page)->rightlink = nextblkno;
310 : }
311 : else
312 910 : GistPageGetOpaque(page)->rightlink = xldata->origrlink;
313 1820 : GistPageSetNSN(page, xldata->orignsn);
314 1820 : if (i < xldata->npage - 1 && !isrootsplit &&
315 900 : xldata->markfollowright)
316 900 : GistMarkFollowRight(page);
317 : else
318 920 : GistClearFollowRight(page);
319 : }
320 :
321 1830 : PageSetLSN(page, lsn);
322 1830 : MarkBufferDirty(buffer);
323 :
324 1830 : if (i == 0)
325 910 : firstbuffer = buffer;
326 : else
327 920 : UnlockReleaseBuffer(buffer);
328 : }
329 :
330 : /* Fix follow-right data on left child page, if any */
331 910 : if (XLogRecHasBlockRef(record, 0))
332 4 : gistRedoClearFollowRight(record, 0);
333 :
334 : /* Finally, release lock on the first page */
335 910 : UnlockReleaseBuffer(firstbuffer);
336 910 : }
337 :
338 : /* redo page deletion */
339 : static void
340 162 : gistRedoPageDelete(XLogReaderState *record)
341 : {
342 162 : XLogRecPtr lsn = record->EndRecPtr;
343 162 : gistxlogPageDelete *xldata = (gistxlogPageDelete *) XLogRecGetData(record);
344 : Buffer parentBuffer;
345 : Buffer leafBuffer;
346 :
347 162 : if (XLogReadBufferForRedo(record, 0, &leafBuffer) == BLK_NEEDS_REDO)
348 : {
349 162 : Page page = BufferGetPage(leafBuffer);
350 :
351 162 : GistPageSetDeleted(page, xldata->deleteXid);
352 :
353 162 : PageSetLSN(page, lsn);
354 162 : MarkBufferDirty(leafBuffer);
355 : }
356 :
357 162 : if (XLogReadBufferForRedo(record, 1, &parentBuffer) == BLK_NEEDS_REDO)
358 : {
359 160 : Page page = BufferGetPage(parentBuffer);
360 :
361 160 : PageIndexTupleDelete(page, xldata->downlinkOffset);
362 :
363 160 : PageSetLSN(page, lsn);
364 160 : MarkBufferDirty(parentBuffer);
365 : }
366 :
367 162 : if (BufferIsValid(parentBuffer))
368 162 : UnlockReleaseBuffer(parentBuffer);
369 162 : if (BufferIsValid(leafBuffer))
370 162 : UnlockReleaseBuffer(leafBuffer);
371 162 : }
372 :
373 : static void
374 0 : gistRedoPageReuse(XLogReaderState *record)
375 : {
376 0 : gistxlogPageReuse *xlrec = (gistxlogPageReuse *) XLogRecGetData(record);
377 :
378 : /*
379 : * PAGE_REUSE records exist to provide a conflict point when we reuse
380 : * pages in the index via the FSM. That's all they do though.
381 : *
382 : * snapshotConflictHorizon was the page's deleteXid. The
383 : * GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable()
384 : * conceptually mirrors the PGPROC->xmin > limitXmin test in
385 : * GetConflictingVirtualXIDs(). Consequently, one XID value achieves the
386 : * same exclusion effect on primary and standby.
387 : */
388 0 : if (InHotStandby)
389 0 : ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon,
390 0 : xlrec->isCatalogRel,
391 : xlrec->locator);
392 0 : }
393 :
394 : void
395 116106 : gist_redo(XLogReaderState *record)
396 : {
397 116106 : uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
398 : MemoryContext oldCxt;
399 :
400 : /*
401 : * GiST indexes do not require any conflict processing. NB: If we ever
402 : * implement a similar optimization we have in b-tree, and remove killed
403 : * tuples outside VACUUM, we'll need to handle that here.
404 : */
405 :
406 116106 : oldCxt = MemoryContextSwitchTo(opCtx);
407 116106 : switch (info)
408 : {
409 115034 : case XLOG_GIST_PAGE_UPDATE:
410 115034 : gistRedoPageUpdateRecord(record);
411 115034 : break;
412 0 : case XLOG_GIST_DELETE:
413 0 : gistRedoDeleteRecord(record);
414 0 : break;
415 0 : case XLOG_GIST_PAGE_REUSE:
416 0 : gistRedoPageReuse(record);
417 0 : break;
418 910 : case XLOG_GIST_PAGE_SPLIT:
419 910 : gistRedoPageSplitRecord(record);
420 910 : break;
421 162 : case XLOG_GIST_PAGE_DELETE:
422 162 : gistRedoPageDelete(record);
423 162 : break;
424 0 : case XLOG_GIST_ASSIGN_LSN:
425 : /* nop. See gistGetFakeLSN(). */
426 0 : break;
427 0 : default:
428 0 : elog(PANIC, "gist_redo: unknown op code %u", info);
429 : }
430 :
431 116106 : MemoryContextSwitchTo(oldCxt);
432 116106 : MemoryContextReset(opCtx);
433 116106 : }
434 :
435 : void
436 416 : gist_xlog_startup(void)
437 : {
438 416 : opCtx = createTempGistContext();
439 416 : }
440 :
441 : void
442 300 : gist_xlog_cleanup(void)
443 : {
444 300 : MemoryContextDelete(opCtx);
445 300 : }
446 :
447 : /*
448 : * Mask a Gist page before running consistency checks on it.
449 : */
450 : void
451 234668 : gist_mask(char *pagedata, BlockNumber blkno)
452 : {
453 234668 : Page page = (Page) pagedata;
454 :
455 234668 : mask_page_lsn_and_checksum(page);
456 :
457 234668 : mask_page_hint_bits(page);
458 234668 : mask_unused_space(page);
459 :
460 : /*
461 : * NSN is nothing but a special purpose LSN. Hence, mask it for the same
462 : * reason as mask_page_lsn_and_checksum.
463 : */
464 234668 : GistPageSetNSN(page, (uint64) MASK_MARKER);
465 :
466 : /*
467 : * We update F_FOLLOW_RIGHT flag on the left child after writing WAL
468 : * record. Hence, mask this flag. See gistplacetopage() for details.
469 : */
470 234668 : GistMarkFollowRight(page);
471 :
472 234668 : if (GistPageIsLeaf(page))
473 : {
474 : /*
475 : * In gist leaf pages, it is possible to modify the LP_FLAGS without
476 : * emitting any WAL record. Hence, mask the line pointer flags. See
477 : * gistkillitems() for details.
478 : */
479 149092 : mask_lp_flags(page);
480 : }
481 :
482 : /*
483 : * During gist redo, we never mark a page as garbage. Hence, mask it to
484 : * ignore any differences.
485 : */
486 234668 : GistClearPageHasGarbage(page);
487 234668 : }
488 :
489 : /*
490 : * Write WAL record of a page split.
491 : */
492 : XLogRecPtr
493 3472 : gistXLogSplit(bool page_is_leaf,
494 : SplitPageLayout *dist,
495 : BlockNumber origrlink, GistNSN orignsn,
496 : Buffer leftchildbuf, bool markfollowright)
497 : {
498 : gistxlogPageSplit xlrec;
499 : SplitPageLayout *ptr;
500 3472 : int npage = 0;
501 : XLogRecPtr recptr;
502 : int i;
503 :
504 10528 : for (ptr = dist; ptr; ptr = ptr->next)
505 7056 : npage++;
506 :
507 3472 : xlrec.origrlink = origrlink;
508 3472 : xlrec.orignsn = orignsn;
509 3472 : xlrec.origleaf = page_is_leaf;
510 3472 : xlrec.npage = (uint16) npage;
511 3472 : xlrec.markfollowright = markfollowright;
512 :
513 3472 : XLogBeginInsert();
514 :
515 : /*
516 : * Include a full page image of the child buf. (only necessary if a
517 : * checkpoint happened since the child page was split)
518 : */
519 3472 : if (BufferIsValid(leftchildbuf))
520 12 : XLogRegisterBuffer(0, leftchildbuf, REGBUF_STANDARD);
521 :
522 : /*
523 : * NOTE: We register a lot of data. The caller must've called
524 : * XLogEnsureRecordSpace() to prepare for that. We cannot do it here,
525 : * because we're already in a critical section. If you change the number
526 : * of buffer or data registrations here, make sure you modify the
527 : * XLogEnsureRecordSpace() calls accordingly!
528 : */
529 3472 : XLogRegisterData(&xlrec, sizeof(gistxlogPageSplit));
530 :
531 3472 : i = 1;
532 10528 : for (ptr = dist; ptr; ptr = ptr->next)
533 : {
534 7056 : XLogRegisterBuffer(i, ptr->buffer, REGBUF_WILL_INIT);
535 7056 : XLogRegisterBufData(i, &(ptr->block.num), sizeof(int));
536 7056 : XLogRegisterBufData(i, ptr->list, ptr->lenlist);
537 7056 : i++;
538 : }
539 :
540 3472 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT);
541 :
542 3472 : return recptr;
543 : }
544 :
545 : /*
546 : * Write XLOG record describing a page deletion. This also includes removal of
547 : * downlink from the parent page.
548 : */
549 : XLogRecPtr
550 162 : gistXLogPageDelete(Buffer buffer, FullTransactionId xid,
551 : Buffer parentBuffer, OffsetNumber downlinkOffset)
552 : {
553 : gistxlogPageDelete xlrec;
554 : XLogRecPtr recptr;
555 :
556 162 : xlrec.deleteXid = xid;
557 162 : xlrec.downlinkOffset = downlinkOffset;
558 :
559 162 : XLogBeginInsert();
560 162 : XLogRegisterData(&xlrec, SizeOfGistxlogPageDelete);
561 :
562 162 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
563 162 : XLogRegisterBuffer(1, parentBuffer, REGBUF_STANDARD);
564 :
565 162 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_DELETE);
566 :
567 162 : return recptr;
568 : }
569 :
570 : /*
571 : * Write an empty XLOG record to assign a distinct LSN.
572 : */
573 : XLogRecPtr
574 0 : gistXLogAssignLSN(void)
575 : {
576 0 : int dummy = 0;
577 :
578 : /*
579 : * Records other than XLOG_SWITCH must have content. We use an integer 0
580 : * to follow the restriction.
581 : */
582 0 : XLogBeginInsert();
583 0 : XLogSetRecordFlags(XLOG_MARK_UNIMPORTANT);
584 0 : XLogRegisterData(&dummy, sizeof(dummy));
585 0 : return XLogInsert(RM_GIST_ID, XLOG_GIST_ASSIGN_LSN);
586 : }
587 :
588 : /*
589 : * Write XLOG record about reuse of a deleted page.
590 : */
591 : void
592 0 : gistXLogPageReuse(Relation rel, Relation heaprel,
593 : BlockNumber blkno, FullTransactionId deleteXid)
594 : {
595 : gistxlogPageReuse xlrec_reuse;
596 :
597 : /*
598 : * Note that we don't register the buffer with the record, because this
599 : * operation doesn't modify the page. This record only exists to provide a
600 : * conflict point for Hot Standby.
601 : */
602 :
603 : /* XLOG stuff */
604 0 : xlrec_reuse.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
605 0 : xlrec_reuse.locator = rel->rd_locator;
606 0 : xlrec_reuse.block = blkno;
607 0 : xlrec_reuse.snapshotConflictHorizon = deleteXid;
608 :
609 0 : XLogBeginInsert();
610 0 : XLogRegisterData(&xlrec_reuse, SizeOfGistxlogPageReuse);
611 :
612 0 : XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_REUSE);
613 0 : }
614 :
615 : /*
616 : * Write XLOG record describing a page update. The update can include any
617 : * number of deletions and/or insertions of tuples on a single index page.
618 : *
619 : * If this update inserts a downlink for a split page, also record that
620 : * the F_FOLLOW_RIGHT flag on the child page is cleared and NSN set.
621 : *
622 : * Note that both the todelete array and the tuples are marked as belonging
623 : * to the target buffer; they need not be stored in XLOG if XLogInsert decides
624 : * to log the whole buffer contents instead.
625 : */
626 : XLogRecPtr
627 495142 : gistXLogUpdate(Buffer buffer,
628 : OffsetNumber *todelete, int ntodelete,
629 : IndexTuple *itup, int ituplen,
630 : Buffer leftchildbuf)
631 : {
632 : gistxlogPageUpdate xlrec;
633 : int i;
634 : XLogRecPtr recptr;
635 :
636 495142 : xlrec.ntodelete = ntodelete;
637 495142 : xlrec.ntoinsert = ituplen;
638 :
639 495142 : XLogBeginInsert();
640 495142 : XLogRegisterData(&xlrec, sizeof(gistxlogPageUpdate));
641 :
642 495142 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
643 495142 : XLogRegisterBufData(0, todelete, sizeof(OffsetNumber) * ntodelete);
644 :
645 : /* new tuples */
646 993142 : for (i = 0; i < ituplen; i++)
647 498000 : XLogRegisterBufData(0, itup[i], IndexTupleSize(itup[i]));
648 :
649 : /*
650 : * Include a full page image of the child buf. (only necessary if a
651 : * checkpoint happened since the child page was split)
652 : */
653 495142 : if (BufferIsValid(leftchildbuf))
654 3348 : XLogRegisterBuffer(1, leftchildbuf, REGBUF_STANDARD);
655 :
656 495142 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE);
657 :
658 495142 : return recptr;
659 : }
660 :
661 : /*
662 : * Write XLOG record describing a delete of leaf index tuples marked as DEAD
663 : * during new tuple insertion. One may think that this case is already covered
664 : * by gistXLogUpdate(). But deletion of index tuples might conflict with
665 : * standby queries and needs special handling.
666 : */
667 : XLogRecPtr
668 0 : gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete,
669 : TransactionId snapshotConflictHorizon, Relation heaprel)
670 : {
671 : gistxlogDelete xlrec;
672 : XLogRecPtr recptr;
673 :
674 0 : xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
675 0 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
676 0 : xlrec.ntodelete = ntodelete;
677 :
678 0 : XLogBeginInsert();
679 0 : XLogRegisterData(&xlrec, SizeOfGistxlogDelete);
680 :
681 : /*
682 : * We need the target-offsets array whether or not we store the whole
683 : * buffer, to allow us to find the snapshotConflictHorizon on a standby
684 : * server.
685 : */
686 0 : XLogRegisterData(todelete, ntodelete * sizeof(OffsetNumber));
687 :
688 0 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
689 :
690 0 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_DELETE);
691 :
692 0 : return recptr;
693 : }
|