Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * gistxlog.c
4 : * WAL replay logic for GiST.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/gist/gistxlog.c
12 : *-------------------------------------------------------------------------
13 : */
14 : #include "postgres.h"
15 :
16 : #include "access/bufmask.h"
17 : #include "access/gist_private.h"
18 : #include "access/gistxlog.h"
19 : #include "access/transam.h"
20 : #include "access/xloginsert.h"
21 : #include "access/xlogutils.h"
22 : #include "storage/standby.h"
23 : #include "utils/memutils.h"
24 : #include "utils/rel.h"
25 :
26 : static MemoryContext opCtx; /* working memory for operations */
27 :
28 : /*
29 : * Replay the clearing of F_FOLLOW_RIGHT flag on a child page.
30 : *
31 : * Even if the WAL record includes a full-page image, we have to update the
32 : * follow-right flag, because that change is not included in the full-page
33 : * image. To be sure that the intermediate state with the wrong flag value is
34 : * not visible to concurrent Hot Standby queries, this function handles
35 : * restoring the full-page image as well as updating the flag. (Note that
36 : * we never need to do anything else to the child page in the current WAL
37 : * action.)
38 : */
39 : static void
40 886 : gistRedoClearFollowRight(XLogReaderState *record, uint8 block_id)
41 : {
42 886 : XLogRecPtr lsn = record->EndRecPtr;
43 : Buffer buffer;
44 : Page page;
45 : XLogRedoAction action;
46 :
47 : /*
48 : * Note that we still update the page even if it was restored from a full
49 : * page image, because the updated NSN is not included in the image.
50 : */
51 886 : action = XLogReadBufferForRedo(record, block_id, &buffer);
52 886 : if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
53 : {
54 886 : page = BufferGetPage(buffer);
55 :
56 886 : GistPageSetNSN(page, lsn);
57 886 : GistClearFollowRight(page);
58 :
59 886 : PageSetLSN(page, lsn);
60 886 : MarkBufferDirty(buffer);
61 : }
62 886 : if (BufferIsValid(buffer))
63 886 : UnlockReleaseBuffer(buffer);
64 886 : }
65 :
66 : /*
67 : * redo any page update (except page split)
68 : */
69 : static void
70 114650 : gistRedoPageUpdateRecord(XLogReaderState *record)
71 : {
72 114650 : XLogRecPtr lsn = record->EndRecPtr;
73 114650 : gistxlogPageUpdate *xldata = (gistxlogPageUpdate *) XLogRecGetData(record);
74 : Buffer buffer;
75 : Page page;
76 :
77 114650 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
78 : {
79 : char *begin;
80 : char *data;
81 : Size datalen;
82 114258 : int ninserted PG_USED_FOR_ASSERTS_ONLY = 0;
83 :
84 114258 : data = begin = XLogRecGetBlockData(record, 0, &datalen);
85 :
86 114258 : page = (Page) BufferGetPage(buffer);
87 :
88 114258 : if (xldata->ntodelete == 1 && xldata->ntoinsert == 1)
89 41694 : {
90 : /*
91 : * When replacing one tuple with one other tuple, we must use
92 : * PageIndexTupleOverwrite for consistency with gistplacetopage.
93 : */
94 41694 : OffsetNumber offnum = *((OffsetNumber *) data);
95 : IndexTuple itup;
96 : Size itupsize;
97 :
98 41694 : data += sizeof(OffsetNumber);
99 41694 : itup = (IndexTuple) data;
100 41694 : itupsize = IndexTupleSize(itup);
101 41694 : if (!PageIndexTupleOverwrite(page, offnum, (Item) itup, itupsize))
102 0 : elog(ERROR, "failed to add item to GiST index page, size %d bytes",
103 : (int) itupsize);
104 41694 : data += itupsize;
105 : /* should be nothing left after consuming 1 tuple */
106 : Assert(data - begin == datalen);
107 : /* update insertion count for assert check below */
108 41694 : ninserted++;
109 : }
110 72564 : else if (xldata->ntodelete > 0)
111 : {
112 : /* Otherwise, delete old tuples if any */
113 878 : OffsetNumber *todelete = (OffsetNumber *) data;
114 :
115 878 : data += sizeof(OffsetNumber) * xldata->ntodelete;
116 :
117 878 : PageIndexMultiDelete(page, todelete, xldata->ntodelete);
118 878 : if (GistPageIsLeaf(page))
119 0 : GistMarkTuplesDeleted(page);
120 : }
121 :
122 : /* Add new tuples if any */
123 114258 : if (data - begin < datalen)
124 : {
125 72564 : OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
126 72412 : OffsetNumberNext(PageGetMaxOffsetNumber(page));
127 :
128 146006 : while (data - begin < datalen)
129 : {
130 73442 : IndexTuple itup = (IndexTuple) data;
131 73442 : Size sz = IndexTupleSize(itup);
132 : OffsetNumber l;
133 :
134 73442 : data += sz;
135 :
136 73442 : l = PageAddItem(page, (Item) itup, sz, off, false, false);
137 73442 : if (l == InvalidOffsetNumber)
138 0 : elog(ERROR, "failed to add item to GiST index page, size %d bytes",
139 : (int) sz);
140 73442 : off++;
141 73442 : ninserted++;
142 : }
143 : }
144 :
145 : /* Check that XLOG record contained expected number of tuples */
146 : Assert(ninserted == xldata->ntoinsert);
147 :
148 114258 : PageSetLSN(page, lsn);
149 114258 : MarkBufferDirty(buffer);
150 : }
151 :
152 : /*
153 : * Fix follow-right data on left child page
154 : *
155 : * This must be done while still holding the lock on the target page. Note
156 : * that even if the target page no longer exists, we still attempt to
157 : * replay the change on the child page.
158 : */
159 114650 : if (XLogRecHasBlockRef(record, 1))
160 882 : gistRedoClearFollowRight(record, 1);
161 :
162 114650 : if (BufferIsValid(buffer))
163 114650 : UnlockReleaseBuffer(buffer);
164 114650 : }
165 :
166 :
167 : /*
168 : * redo delete on gist index page to remove tuples marked as DEAD during index
169 : * tuple insertion
170 : */
171 : static void
172 0 : gistRedoDeleteRecord(XLogReaderState *record)
173 : {
174 0 : XLogRecPtr lsn = record->EndRecPtr;
175 0 : gistxlogDelete *xldata = (gistxlogDelete *) XLogRecGetData(record);
176 : Buffer buffer;
177 : Page page;
178 0 : OffsetNumber *toDelete = xldata->offsets;
179 :
180 : /*
181 : * If we have any conflict processing to do, it must happen before we
182 : * update the page.
183 : *
184 : * GiST delete records can conflict with standby queries. You might think
185 : * that vacuum records would conflict as well, but we've handled that
186 : * already. XLOG_HEAP2_PRUNE_VACUUM_SCAN records provide the highest xid
187 : * cleaned by the vacuum of the heap and so we can resolve any conflicts
188 : * just once when that arrives. After that we know that no conflicts
189 : * exist from individual gist vacuum records on that index.
190 : */
191 0 : if (InHotStandby)
192 : {
193 : RelFileLocator rlocator;
194 :
195 0 : XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
196 :
197 0 : ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
198 0 : xldata->isCatalogRel,
199 : rlocator);
200 : }
201 :
202 0 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
203 : {
204 0 : page = (Page) BufferGetPage(buffer);
205 :
206 0 : PageIndexMultiDelete(page, toDelete, xldata->ntodelete);
207 :
208 0 : GistClearPageHasGarbage(page);
209 0 : GistMarkTuplesDeleted(page);
210 :
211 0 : PageSetLSN(page, lsn);
212 0 : MarkBufferDirty(buffer);
213 : }
214 :
215 0 : if (BufferIsValid(buffer))
216 0 : UnlockReleaseBuffer(buffer);
217 0 : }
218 :
219 : /*
220 : * Returns an array of index pointers.
221 : */
222 : static IndexTuple *
223 1802 : decodePageSplitRecord(char *begin, int len, int *n)
224 : {
225 : char *ptr;
226 1802 : int i = 0;
227 : IndexTuple *tuples;
228 :
229 : /* extract the number of tuples */
230 1802 : memcpy(n, begin, sizeof(int));
231 1802 : ptr = begin + sizeof(int);
232 :
233 1802 : tuples = palloc(*n * sizeof(IndexTuple));
234 :
235 156450 : for (i = 0; i < *n; i++)
236 : {
237 : Assert(ptr - begin < len);
238 154648 : tuples[i] = (IndexTuple) ptr;
239 154648 : ptr += IndexTupleSize((IndexTuple) ptr);
240 : }
241 : Assert(ptr - begin == len);
242 :
243 1802 : return tuples;
244 : }
245 :
246 : static void
247 896 : gistRedoPageSplitRecord(XLogReaderState *record)
248 : {
249 896 : XLogRecPtr lsn = record->EndRecPtr;
250 896 : gistxlogPageSplit *xldata = (gistxlogPageSplit *) XLogRecGetData(record);
251 896 : Buffer firstbuffer = InvalidBuffer;
252 : Buffer buffer;
253 : Page page;
254 : int i;
255 896 : bool isrootsplit = false;
256 :
257 : /*
258 : * We must hold lock on the first-listed page throughout the action,
259 : * including while updating the left child page (if any). We can unlock
260 : * remaining pages in the list as soon as they've been written, because
261 : * there is no path for concurrent queries to reach those pages without
262 : * first visiting the first-listed page.
263 : */
264 :
265 : /* loop around all pages */
266 2698 : for (i = 0; i < xldata->npage; i++)
267 : {
268 : int flags;
269 : char *data;
270 : Size datalen;
271 : int num;
272 : BlockNumber blkno;
273 : IndexTuple *tuples;
274 :
275 1802 : XLogRecGetBlockTag(record, i + 1, NULL, NULL, &blkno);
276 1802 : if (blkno == GIST_ROOT_BLKNO)
277 : {
278 : Assert(i == 0);
279 10 : isrootsplit = true;
280 : }
281 :
282 1802 : buffer = XLogInitBufferForRedo(record, i + 1);
283 1802 : page = (Page) BufferGetPage(buffer);
284 1802 : data = XLogRecGetBlockData(record, i + 1, &datalen);
285 :
286 1802 : tuples = decodePageSplitRecord(data, datalen, &num);
287 :
288 : /* ok, clear buffer */
289 1802 : if (xldata->origleaf && blkno != GIST_ROOT_BLKNO)
290 1784 : flags = F_LEAF;
291 : else
292 18 : flags = 0;
293 1802 : GISTInitBuffer(buffer, flags);
294 :
295 : /* and fill it */
296 1802 : gistfillbuffer(page, tuples, num, FirstOffsetNumber);
297 :
298 1802 : if (blkno == GIST_ROOT_BLKNO)
299 : {
300 10 : GistPageGetOpaque(page)->rightlink = InvalidBlockNumber;
301 10 : GistPageSetNSN(page, xldata->orignsn);
302 10 : GistClearFollowRight(page);
303 : }
304 : else
305 : {
306 1792 : if (i < xldata->npage - 1)
307 : {
308 : BlockNumber nextblkno;
309 :
310 896 : XLogRecGetBlockTag(record, i + 2, NULL, NULL, &nextblkno);
311 896 : GistPageGetOpaque(page)->rightlink = nextblkno;
312 : }
313 : else
314 896 : GistPageGetOpaque(page)->rightlink = xldata->origrlink;
315 1792 : GistPageSetNSN(page, xldata->orignsn);
316 1792 : if (i < xldata->npage - 1 && !isrootsplit &&
317 886 : xldata->markfollowright)
318 886 : GistMarkFollowRight(page);
319 : else
320 906 : GistClearFollowRight(page);
321 : }
322 :
323 1802 : PageSetLSN(page, lsn);
324 1802 : MarkBufferDirty(buffer);
325 :
326 1802 : if (i == 0)
327 896 : firstbuffer = buffer;
328 : else
329 906 : UnlockReleaseBuffer(buffer);
330 : }
331 :
332 : /* Fix follow-right data on left child page, if any */
333 896 : if (XLogRecHasBlockRef(record, 0))
334 4 : gistRedoClearFollowRight(record, 0);
335 :
336 : /* Finally, release lock on the first page */
337 896 : UnlockReleaseBuffer(firstbuffer);
338 896 : }
339 :
340 : /* redo page deletion */
341 : static void
342 0 : gistRedoPageDelete(XLogReaderState *record)
343 : {
344 0 : XLogRecPtr lsn = record->EndRecPtr;
345 0 : gistxlogPageDelete *xldata = (gistxlogPageDelete *) XLogRecGetData(record);
346 : Buffer parentBuffer;
347 : Buffer leafBuffer;
348 :
349 0 : if (XLogReadBufferForRedo(record, 0, &leafBuffer) == BLK_NEEDS_REDO)
350 : {
351 0 : Page page = (Page) BufferGetPage(leafBuffer);
352 :
353 0 : GistPageSetDeleted(page, xldata->deleteXid);
354 :
355 0 : PageSetLSN(page, lsn);
356 0 : MarkBufferDirty(leafBuffer);
357 : }
358 :
359 0 : if (XLogReadBufferForRedo(record, 1, &parentBuffer) == BLK_NEEDS_REDO)
360 : {
361 0 : Page page = (Page) BufferGetPage(parentBuffer);
362 :
363 0 : PageIndexTupleDelete(page, xldata->downlinkOffset);
364 :
365 0 : PageSetLSN(page, lsn);
366 0 : MarkBufferDirty(parentBuffer);
367 : }
368 :
369 0 : if (BufferIsValid(parentBuffer))
370 0 : UnlockReleaseBuffer(parentBuffer);
371 0 : if (BufferIsValid(leafBuffer))
372 0 : UnlockReleaseBuffer(leafBuffer);
373 0 : }
374 :
375 : static void
376 0 : gistRedoPageReuse(XLogReaderState *record)
377 : {
378 0 : gistxlogPageReuse *xlrec = (gistxlogPageReuse *) XLogRecGetData(record);
379 :
380 : /*
381 : * PAGE_REUSE records exist to provide a conflict point when we reuse
382 : * pages in the index via the FSM. That's all they do though.
383 : *
384 : * snapshotConflictHorizon was the page's deleteXid. The
385 : * GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable()
386 : * conceptually mirrors the PGPROC->xmin > limitXmin test in
387 : * GetConflictingVirtualXIDs(). Consequently, one XID value achieves the
388 : * same exclusion effect on primary and standby.
389 : */
390 0 : if (InHotStandby)
391 0 : ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon,
392 0 : xlrec->isCatalogRel,
393 : xlrec->locator);
394 0 : }
395 :
396 : void
397 115546 : gist_redo(XLogReaderState *record)
398 : {
399 115546 : uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
400 : MemoryContext oldCxt;
401 :
402 : /*
403 : * GiST indexes do not require any conflict processing. NB: If we ever
404 : * implement a similar optimization we have in b-tree, and remove killed
405 : * tuples outside VACUUM, we'll need to handle that here.
406 : */
407 :
408 115546 : oldCxt = MemoryContextSwitchTo(opCtx);
409 115546 : switch (info)
410 : {
411 114650 : case XLOG_GIST_PAGE_UPDATE:
412 114650 : gistRedoPageUpdateRecord(record);
413 114650 : break;
414 0 : case XLOG_GIST_DELETE:
415 0 : gistRedoDeleteRecord(record);
416 0 : break;
417 0 : case XLOG_GIST_PAGE_REUSE:
418 0 : gistRedoPageReuse(record);
419 0 : break;
420 896 : case XLOG_GIST_PAGE_SPLIT:
421 896 : gistRedoPageSplitRecord(record);
422 896 : break;
423 0 : case XLOG_GIST_PAGE_DELETE:
424 0 : gistRedoPageDelete(record);
425 0 : break;
426 0 : case XLOG_GIST_ASSIGN_LSN:
427 : /* nop. See gistGetFakeLSN(). */
428 0 : break;
429 0 : default:
430 0 : elog(PANIC, "gist_redo: unknown op code %u", info);
431 : }
432 :
433 115546 : MemoryContextSwitchTo(oldCxt);
434 115546 : MemoryContextReset(opCtx);
435 115546 : }
436 :
437 : void
438 392 : gist_xlog_startup(void)
439 : {
440 392 : opCtx = createTempGistContext();
441 392 : }
442 :
443 : void
444 288 : gist_xlog_cleanup(void)
445 : {
446 288 : MemoryContextDelete(opCtx);
447 288 : }
448 :
449 : /*
450 : * Mask a Gist page before running consistency checks on it.
451 : */
452 : void
453 233892 : gist_mask(char *pagedata, BlockNumber blkno)
454 : {
455 233892 : Page page = (Page) pagedata;
456 :
457 233892 : mask_page_lsn_and_checksum(page);
458 :
459 233892 : mask_page_hint_bits(page);
460 233892 : mask_unused_space(page);
461 :
462 : /*
463 : * NSN is nothing but a special purpose LSN. Hence, mask it for the same
464 : * reason as mask_page_lsn_and_checksum.
465 : */
466 233892 : GistPageSetNSN(page, (uint64) MASK_MARKER);
467 :
468 : /*
469 : * We update F_FOLLOW_RIGHT flag on the left child after writing WAL
470 : * record. Hence, mask this flag. See gistplacetopage() for details.
471 : */
472 233892 : GistMarkFollowRight(page);
473 :
474 233892 : if (GistPageIsLeaf(page))
475 : {
476 : /*
477 : * In gist leaf pages, it is possible to modify the LP_FLAGS without
478 : * emitting any WAL record. Hence, mask the line pointer flags. See
479 : * gistkillitems() for details.
480 : */
481 148712 : mask_lp_flags(page);
482 : }
483 :
484 : /*
485 : * During gist redo, we never mark a page as garbage. Hence, mask it to
486 : * ignore any differences.
487 : */
488 233892 : GistClearPageHasGarbage(page);
489 233892 : }
490 :
491 : /*
492 : * Write WAL record of a page split.
493 : */
494 : XLogRecPtr
495 3472 : gistXLogSplit(bool page_is_leaf,
496 : SplitPageLayout *dist,
497 : BlockNumber origrlink, GistNSN orignsn,
498 : Buffer leftchildbuf, bool markfollowright)
499 : {
500 : gistxlogPageSplit xlrec;
501 : SplitPageLayout *ptr;
502 3472 : int npage = 0;
503 : XLogRecPtr recptr;
504 : int i;
505 :
506 10528 : for (ptr = dist; ptr; ptr = ptr->next)
507 7056 : npage++;
508 :
509 3472 : xlrec.origrlink = origrlink;
510 3472 : xlrec.orignsn = orignsn;
511 3472 : xlrec.origleaf = page_is_leaf;
512 3472 : xlrec.npage = (uint16) npage;
513 3472 : xlrec.markfollowright = markfollowright;
514 :
515 3472 : XLogBeginInsert();
516 :
517 : /*
518 : * Include a full page image of the child buf. (only necessary if a
519 : * checkpoint happened since the child page was split)
520 : */
521 3472 : if (BufferIsValid(leftchildbuf))
522 12 : XLogRegisterBuffer(0, leftchildbuf, REGBUF_STANDARD);
523 :
524 : /*
525 : * NOTE: We register a lot of data. The caller must've called
526 : * XLogEnsureRecordSpace() to prepare for that. We cannot do it here,
527 : * because we're already in a critical section. If you change the number
528 : * of buffer or data registrations here, make sure you modify the
529 : * XLogEnsureRecordSpace() calls accordingly!
530 : */
531 3472 : XLogRegisterData((char *) &xlrec, sizeof(gistxlogPageSplit));
532 :
533 3472 : i = 1;
534 10528 : for (ptr = dist; ptr; ptr = ptr->next)
535 : {
536 7056 : XLogRegisterBuffer(i, ptr->buffer, REGBUF_WILL_INIT);
537 7056 : XLogRegisterBufData(i, (char *) &(ptr->block.num), sizeof(int));
538 7056 : XLogRegisterBufData(i, (char *) ptr->list, ptr->lenlist);
539 7056 : i++;
540 : }
541 :
542 3472 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT);
543 :
544 3472 : return recptr;
545 : }
546 :
547 : /*
548 : * Write XLOG record describing a page deletion. This also includes removal of
549 : * downlink from the parent page.
550 : */
551 : XLogRecPtr
552 324 : gistXLogPageDelete(Buffer buffer, FullTransactionId xid,
553 : Buffer parentBuffer, OffsetNumber downlinkOffset)
554 : {
555 : gistxlogPageDelete xlrec;
556 : XLogRecPtr recptr;
557 :
558 324 : xlrec.deleteXid = xid;
559 324 : xlrec.downlinkOffset = downlinkOffset;
560 :
561 324 : XLogBeginInsert();
562 324 : XLogRegisterData((char *) &xlrec, SizeOfGistxlogPageDelete);
563 :
564 324 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
565 324 : XLogRegisterBuffer(1, parentBuffer, REGBUF_STANDARD);
566 :
567 324 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_DELETE);
568 :
569 324 : return recptr;
570 : }
571 :
572 : /*
573 : * Write an empty XLOG record to assign a distinct LSN.
574 : */
575 : XLogRecPtr
576 0 : gistXLogAssignLSN(void)
577 : {
578 0 : int dummy = 0;
579 :
580 : /*
581 : * Records other than XLOG_SWITCH must have content. We use an integer 0
582 : * to follow the restriction.
583 : */
584 0 : XLogBeginInsert();
585 0 : XLogSetRecordFlags(XLOG_MARK_UNIMPORTANT);
586 0 : XLogRegisterData((char *) &dummy, sizeof(dummy));
587 0 : return XLogInsert(RM_GIST_ID, XLOG_GIST_ASSIGN_LSN);
588 : }
589 :
590 : /*
591 : * Write XLOG record about reuse of a deleted page.
592 : */
593 : void
594 0 : gistXLogPageReuse(Relation rel, Relation heaprel,
595 : BlockNumber blkno, FullTransactionId deleteXid)
596 : {
597 : gistxlogPageReuse xlrec_reuse;
598 :
599 : /*
600 : * Note that we don't register the buffer with the record, because this
601 : * operation doesn't modify the page. This record only exists to provide a
602 : * conflict point for Hot Standby.
603 : */
604 :
605 : /* XLOG stuff */
606 0 : xlrec_reuse.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
607 0 : xlrec_reuse.locator = rel->rd_locator;
608 0 : xlrec_reuse.block = blkno;
609 0 : xlrec_reuse.snapshotConflictHorizon = deleteXid;
610 :
611 0 : XLogBeginInsert();
612 0 : XLogRegisterData((char *) &xlrec_reuse, SizeOfGistxlogPageReuse);
613 :
614 0 : XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_REUSE);
615 0 : }
616 :
617 : /*
618 : * Write XLOG record describing a page update. The update can include any
619 : * number of deletions and/or insertions of tuples on a single index page.
620 : *
621 : * If this update inserts a downlink for a split page, also record that
622 : * the F_FOLLOW_RIGHT flag on the child page is cleared and NSN set.
623 : *
624 : * Note that both the todelete array and the tuples are marked as belonging
625 : * to the target buffer; they need not be stored in XLOG if XLogInsert decides
626 : * to log the whole buffer contents instead.
627 : */
628 : XLogRecPtr
629 495532 : gistXLogUpdate(Buffer buffer,
630 : OffsetNumber *todelete, int ntodelete,
631 : IndexTuple *itup, int ituplen,
632 : Buffer leftchildbuf)
633 : {
634 : gistxlogPageUpdate xlrec;
635 : int i;
636 : XLogRecPtr recptr;
637 :
638 495532 : xlrec.ntodelete = ntodelete;
639 495532 : xlrec.ntoinsert = ituplen;
640 :
641 495532 : XLogBeginInsert();
642 495532 : XLogRegisterData((char *) &xlrec, sizeof(gistxlogPageUpdate));
643 :
644 495532 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
645 495532 : XLogRegisterBufData(0, (char *) todelete, sizeof(OffsetNumber) * ntodelete);
646 :
647 : /* new tuples */
648 993628 : for (i = 0; i < ituplen; i++)
649 498096 : XLogRegisterBufData(0, (char *) (itup[i]), IndexTupleSize(itup[i]));
650 :
651 : /*
652 : * Include a full page image of the child buf. (only necessary if a
653 : * checkpoint happened since the child page was split)
654 : */
655 495532 : if (BufferIsValid(leftchildbuf))
656 3348 : XLogRegisterBuffer(1, leftchildbuf, REGBUF_STANDARD);
657 :
658 495532 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE);
659 :
660 495532 : return recptr;
661 : }
662 :
663 : /*
664 : * Write XLOG record describing a delete of leaf index tuples marked as DEAD
665 : * during new tuple insertion. One may think that this case is already covered
666 : * by gistXLogUpdate(). But deletion of index tuples might conflict with
667 : * standby queries and needs special handling.
668 : */
669 : XLogRecPtr
670 0 : gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete,
671 : TransactionId snapshotConflictHorizon, Relation heaprel)
672 : {
673 : gistxlogDelete xlrec;
674 : XLogRecPtr recptr;
675 :
676 0 : xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
677 0 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
678 0 : xlrec.ntodelete = ntodelete;
679 :
680 0 : XLogBeginInsert();
681 0 : XLogRegisterData((char *) &xlrec, SizeOfGistxlogDelete);
682 :
683 : /*
684 : * We need the target-offsets array whether or not we store the whole
685 : * buffer, to allow us to find the snapshotConflictHorizon on a standby
686 : * server.
687 : */
688 0 : XLogRegisterData((char *) todelete, ntodelete * sizeof(OffsetNumber));
689 :
690 0 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
691 :
692 0 : recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_DELETE);
693 :
694 0 : return recptr;
695 : }
|