Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * heapam_handler.c
4 : * heap table access method code
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/heapam_handler.c
12 : *
13 : *
14 : * NOTES
15 : * This files wires up the lower level heapam.c et al routines with the
16 : * tableam abstraction.
17 : *
18 : *-------------------------------------------------------------------------
19 : */
20 : #include "postgres.h"
21 :
22 : #include "access/genam.h"
23 : #include "access/heapam.h"
24 : #include "access/heaptoast.h"
25 : #include "access/multixact.h"
26 : #include "access/rewriteheap.h"
27 : #include "access/syncscan.h"
28 : #include "access/tableam.h"
29 : #include "access/tsmapi.h"
30 : #include "access/visibilitymap.h"
31 : #include "access/xact.h"
32 : #include "catalog/catalog.h"
33 : #include "catalog/index.h"
34 : #include "catalog/storage.h"
35 : #include "catalog/storage_xlog.h"
36 : #include "commands/progress.h"
37 : #include "executor/executor.h"
38 : #include "miscadmin.h"
39 : #include "pgstat.h"
40 : #include "storage/bufmgr.h"
41 : #include "storage/bufpage.h"
42 : #include "storage/lmgr.h"
43 : #include "storage/predicate.h"
44 : #include "storage/procarray.h"
45 : #include "storage/smgr.h"
46 : #include "utils/builtins.h"
47 : #include "utils/rel.h"
48 :
49 : static void reform_and_rewrite_tuple(HeapTuple tuple,
50 : Relation OldHeap, Relation NewHeap,
51 : Datum *values, bool *isnull, RewriteState rwstate);
52 :
53 : static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
54 : HeapTuple tuple,
55 : OffsetNumber tupoffset);
56 :
57 : static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan);
58 :
59 : static bool BitmapHeapScanNextBlock(TableScanDesc scan,
60 : bool *recheck,
61 : uint64 *lossy_pages, uint64 *exact_pages);
62 :
63 :
64 : /* ------------------------------------------------------------------------
65 : * Slot related callbacks for heap AM
66 : * ------------------------------------------------------------------------
67 : */
68 :
69 : static const TupleTableSlotOps *
70 28799836 : heapam_slot_callbacks(Relation relation)
71 : {
72 28799836 : return &TTSOpsBufferHeapTuple;
73 : }
74 :
75 :
76 : /* ------------------------------------------------------------------------
77 : * Index Scan Callbacks for heap AM
78 : * ------------------------------------------------------------------------
79 : */
80 :
81 : static IndexFetchTableData *
82 27601730 : heapam_index_fetch_begin(Relation rel)
83 : {
84 27601730 : IndexFetchHeapData *hscan = palloc0_object(IndexFetchHeapData);
85 :
86 27601730 : hscan->xs_base.rel = rel;
87 27601730 : hscan->xs_cbuf = InvalidBuffer;
88 :
89 27601730 : return &hscan->xs_base;
90 : }
91 :
92 : static void
93 51716682 : heapam_index_fetch_reset(IndexFetchTableData *scan)
94 : {
95 51716682 : IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
96 :
97 51716682 : if (BufferIsValid(hscan->xs_cbuf))
98 : {
99 23464534 : ReleaseBuffer(hscan->xs_cbuf);
100 23464534 : hscan->xs_cbuf = InvalidBuffer;
101 : }
102 51716682 : }
103 :
104 : static void
105 27599932 : heapam_index_fetch_end(IndexFetchTableData *scan)
106 : {
107 27599932 : IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
108 :
109 27599932 : heapam_index_fetch_reset(scan);
110 :
111 27599932 : pfree(hscan);
112 27599932 : }
113 :
114 : static bool
115 39585228 : heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
116 : ItemPointer tid,
117 : Snapshot snapshot,
118 : TupleTableSlot *slot,
119 : bool *call_again, bool *all_dead)
120 : {
121 39585228 : IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
122 39585228 : BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
123 : bool got_heap_tuple;
124 :
125 : Assert(TTS_IS_BUFFERTUPLE(slot));
126 :
127 : /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
128 39585228 : if (!*call_again)
129 : {
130 : /* Switch to correct buffer if we don't have it already */
131 39411066 : Buffer prev_buf = hscan->xs_cbuf;
132 :
133 39411066 : hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
134 : hscan->xs_base.rel,
135 : ItemPointerGetBlockNumber(tid));
136 :
137 : /*
138 : * Prune page, but only if we weren't already on this page
139 : */
140 39411060 : if (prev_buf != hscan->xs_cbuf)
141 27315410 : heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
142 : }
143 :
144 : /* Obtain share-lock on the buffer so we can examine visibility */
145 39585222 : LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_SHARE);
146 39585222 : got_heap_tuple = heap_hot_search_buffer(tid,
147 : hscan->xs_base.rel,
148 : hscan->xs_cbuf,
149 : snapshot,
150 : &bslot->base.tupdata,
151 : all_dead,
152 39585222 : !*call_again);
153 39585218 : bslot->base.tupdata.t_self = *tid;
154 39585218 : LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_UNLOCK);
155 :
156 39585218 : if (got_heap_tuple)
157 : {
158 : /*
159 : * Only in a non-MVCC snapshot can more than one member of the HOT
160 : * chain be visible.
161 : */
162 26924378 : *call_again = !IsMVCCSnapshot(snapshot);
163 :
164 26924378 : slot->tts_tableOid = RelationGetRelid(scan->rel);
165 26924378 : ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
166 : }
167 : else
168 : {
169 : /* We've reached the end of the HOT chain. */
170 12660840 : *call_again = false;
171 : }
172 :
173 39585218 : return got_heap_tuple;
174 : }
175 :
176 :
177 : /* ------------------------------------------------------------------------
178 : * Callbacks for non-modifying operations on individual tuples for heap AM
179 : * ------------------------------------------------------------------------
180 : */
181 :
182 : static bool
183 357752 : heapam_fetch_row_version(Relation relation,
184 : ItemPointer tid,
185 : Snapshot snapshot,
186 : TupleTableSlot *slot)
187 : {
188 357752 : BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
189 : Buffer buffer;
190 :
191 : Assert(TTS_IS_BUFFERTUPLE(slot));
192 :
193 357752 : bslot->base.tupdata.t_self = *tid;
194 357752 : if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer, false))
195 : {
196 : /* store in slot, transferring existing pin */
197 357024 : ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
198 357024 : slot->tts_tableOid = RelationGetRelid(relation);
199 :
200 357024 : return true;
201 : }
202 :
203 712 : return false;
204 : }
205 :
206 : static bool
207 766 : heapam_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
208 : {
209 766 : HeapScanDesc hscan = (HeapScanDesc) scan;
210 :
211 1514 : return ItemPointerIsValid(tid) &&
212 748 : ItemPointerGetBlockNumber(tid) < hscan->rs_nblocks;
213 : }
214 :
215 : static bool
216 255248 : heapam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
217 : Snapshot snapshot)
218 : {
219 255248 : BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
220 : bool res;
221 :
222 : Assert(TTS_IS_BUFFERTUPLE(slot));
223 : Assert(BufferIsValid(bslot->buffer));
224 :
225 : /*
226 : * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
227 : * Caller should be holding pin, but not lock.
228 : */
229 255248 : LockBuffer(bslot->buffer, BUFFER_LOCK_SHARE);
230 255248 : res = HeapTupleSatisfiesVisibility(bslot->base.tuple, snapshot,
231 : bslot->buffer);
232 255248 : LockBuffer(bslot->buffer, BUFFER_LOCK_UNLOCK);
233 :
234 255248 : return res;
235 : }
236 :
237 :
238 : /* ----------------------------------------------------------------------------
239 : * Functions for manipulations of physical tuples for heap AM.
240 : * ----------------------------------------------------------------------------
241 : */
242 :
243 : static void
244 14843262 : heapam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
245 : int options, BulkInsertState bistate)
246 : {
247 14843262 : bool shouldFree = true;
248 14843262 : HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
249 :
250 : /* Update the tuple with table oid */
251 14843262 : slot->tts_tableOid = RelationGetRelid(relation);
252 14843262 : tuple->t_tableOid = slot->tts_tableOid;
253 :
254 : /* Perform the insertion, and copy the resulting ItemPointer */
255 14843262 : heap_insert(relation, tuple, cid, options, bistate);
256 14843224 : ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
257 :
258 14843224 : if (shouldFree)
259 2967996 : pfree(tuple);
260 14843224 : }
261 :
262 : static void
263 4196 : heapam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot,
264 : CommandId cid, int options,
265 : BulkInsertState bistate, uint32 specToken)
266 : {
267 4196 : bool shouldFree = true;
268 4196 : HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
269 :
270 : /* Update the tuple with table oid */
271 4196 : slot->tts_tableOid = RelationGetRelid(relation);
272 4196 : tuple->t_tableOid = slot->tts_tableOid;
273 :
274 4196 : HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
275 4196 : options |= HEAP_INSERT_SPECULATIVE;
276 :
277 : /* Perform the insertion, and copy the resulting ItemPointer */
278 4196 : heap_insert(relation, tuple, cid, options, bistate);
279 4196 : ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
280 :
281 4196 : if (shouldFree)
282 82 : pfree(tuple);
283 4196 : }
284 :
285 : static void
286 4190 : heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
287 : uint32 specToken, bool succeeded)
288 : {
289 4190 : bool shouldFree = true;
290 4190 : HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
291 :
292 : /* adjust the tuple's state accordingly */
293 4190 : if (succeeded)
294 4168 : heap_finish_speculative(relation, &slot->tts_tid);
295 : else
296 22 : heap_abort_speculative(relation, &slot->tts_tid);
297 :
298 4190 : if (shouldFree)
299 82 : pfree(tuple);
300 4190 : }
301 :
302 : static TM_Result
303 1731952 : heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
304 : Snapshot snapshot, Snapshot crosscheck, bool wait,
305 : TM_FailureData *tmfd, bool changingPart)
306 : {
307 : /*
308 : * Currently Deleting of index tuples are handled at vacuum, in case if
309 : * the storage itself is cleaning the dead tuples by itself, it is the
310 : * time to call the index tuple deletion also.
311 : */
312 1731952 : return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
313 : }
314 :
315 :
316 : static TM_Result
317 389472 : heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
318 : CommandId cid, Snapshot snapshot, Snapshot crosscheck,
319 : bool wait, TM_FailureData *tmfd,
320 : LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
321 : {
322 389472 : bool shouldFree = true;
323 389472 : HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
324 : TM_Result result;
325 :
326 : /* Update the tuple with table oid */
327 389472 : slot->tts_tableOid = RelationGetRelid(relation);
328 389472 : tuple->t_tableOid = slot->tts_tableOid;
329 :
330 389472 : result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
331 : tmfd, lockmode, update_indexes);
332 389448 : ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
333 :
334 : /*
335 : * Decide whether new index entries are needed for the tuple
336 : *
337 : * Note: heap_update returns the tid (location) of the new tuple in the
338 : * t_self field.
339 : *
340 : * If the update is not HOT, we must update all indexes. If the update is
341 : * HOT, it could be that we updated summarized columns, so we either
342 : * update only summarized indexes, or none at all.
343 : */
344 389448 : if (result != TM_Ok)
345 : {
346 : Assert(*update_indexes == TU_None);
347 316 : *update_indexes = TU_None;
348 : }
349 389132 : else if (!HeapTupleIsHeapOnly(tuple))
350 : Assert(*update_indexes == TU_All);
351 : else
352 : Assert((*update_indexes == TU_Summarizing) ||
353 : (*update_indexes == TU_None));
354 :
355 389448 : if (shouldFree)
356 63888 : pfree(tuple);
357 :
358 389448 : return result;
359 : }
360 :
361 : static TM_Result
362 316452 : heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
363 : TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
364 : LockWaitPolicy wait_policy, uint8 flags,
365 : TM_FailureData *tmfd)
366 : {
367 316452 : BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
368 : TM_Result result;
369 : Buffer buffer;
370 316452 : HeapTuple tuple = &bslot->base.tupdata;
371 : bool follow_updates;
372 :
373 316452 : follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
374 316452 : tmfd->traversed = false;
375 :
376 : Assert(TTS_IS_BUFFERTUPLE(slot));
377 :
378 316792 : tuple_lock_retry:
379 316792 : tuple->t_self = *tid;
380 316792 : result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
381 : follow_updates, &buffer, tmfd);
382 :
383 316774 : if (result == TM_Updated &&
384 420 : (flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION))
385 : {
386 : /* Should not encounter speculative tuple on recheck */
387 : Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
388 :
389 386 : ReleaseBuffer(buffer);
390 :
391 386 : if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
392 : {
393 : SnapshotData SnapshotDirty;
394 : TransactionId priorXmax;
395 :
396 : /* it was updated, so look at the updated version */
397 386 : *tid = tmfd->ctid;
398 : /* updated row should have xmin matching this xmax */
399 386 : priorXmax = tmfd->xmax;
400 :
401 : /* signal that a tuple later in the chain is getting locked */
402 386 : tmfd->traversed = true;
403 :
404 : /*
405 : * fetch target tuple
406 : *
407 : * Loop here to deal with updated or busy tuples
408 : */
409 386 : InitDirtySnapshot(SnapshotDirty);
410 : for (;;)
411 : {
412 448 : if (ItemPointerIndicatesMovedPartitions(tid))
413 22 : ereport(ERROR,
414 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
415 : errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
416 :
417 426 : tuple->t_self = *tid;
418 426 : if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, true))
419 : {
420 : /*
421 : * If xmin isn't what we're expecting, the slot must have
422 : * been recycled and reused for an unrelated tuple. This
423 : * implies that the latest version of the row was deleted,
424 : * so we need do nothing. (Should be safe to examine xmin
425 : * without getting buffer's content lock. We assume
426 : * reading a TransactionId to be atomic, and Xmin never
427 : * changes in an existing tuple, except to invalid or
428 : * frozen, and neither of those can match priorXmax.)
429 : */
430 358 : if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
431 : priorXmax))
432 : {
433 0 : ReleaseBuffer(buffer);
434 22 : return TM_Deleted;
435 : }
436 :
437 : /* otherwise xmin should not be dirty... */
438 358 : if (TransactionIdIsValid(SnapshotDirty.xmin))
439 0 : ereport(ERROR,
440 : (errcode(ERRCODE_DATA_CORRUPTED),
441 : errmsg_internal("t_xmin %u is uncommitted in tuple (%u,%u) to be updated in table \"%s\"",
442 : SnapshotDirty.xmin,
443 : ItemPointerGetBlockNumber(&tuple->t_self),
444 : ItemPointerGetOffsetNumber(&tuple->t_self),
445 : RelationGetRelationName(relation))));
446 :
447 : /*
448 : * If tuple is being updated by other transaction then we
449 : * have to wait for its commit/abort, or die trying.
450 : */
451 358 : if (TransactionIdIsValid(SnapshotDirty.xmax))
452 : {
453 4 : ReleaseBuffer(buffer);
454 4 : switch (wait_policy)
455 : {
456 0 : case LockWaitBlock:
457 0 : XactLockTableWait(SnapshotDirty.xmax,
458 0 : relation, &tuple->t_self,
459 : XLTW_FetchUpdated);
460 0 : break;
461 2 : case LockWaitSkip:
462 2 : if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, false))
463 : /* skip instead of waiting */
464 2 : return TM_WouldBlock;
465 0 : break;
466 2 : case LockWaitError:
467 2 : if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failures))
468 2 : ereport(ERROR,
469 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
470 : errmsg("could not obtain lock on row in relation \"%s\"",
471 : RelationGetRelationName(relation))));
472 0 : break;
473 : }
474 0 : continue; /* loop back to repeat heap_fetch */
475 : }
476 :
477 : /*
478 : * If tuple was inserted by our own transaction, we have
479 : * to check cmin against cid: cmin >= current CID means
480 : * our command cannot see the tuple, so we should ignore
481 : * it. Otherwise heap_lock_tuple() will throw an error,
482 : * and so would any later attempt to update or delete the
483 : * tuple. (We need not check cmax because
484 : * HeapTupleSatisfiesDirty will consider a tuple deleted
485 : * by our transaction dead, regardless of cmax.) We just
486 : * checked that priorXmax == xmin, so we can test that
487 : * variable instead of doing HeapTupleHeaderGetXmin again.
488 : */
489 368 : if (TransactionIdIsCurrentTransactionId(priorXmax) &&
490 14 : HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
491 : {
492 14 : tmfd->xmax = priorXmax;
493 :
494 : /*
495 : * Cmin is the problematic value, so store that. See
496 : * above.
497 : */
498 14 : tmfd->cmax = HeapTupleHeaderGetCmin(tuple->t_data);
499 14 : ReleaseBuffer(buffer);
500 14 : return TM_SelfModified;
501 : }
502 :
503 : /*
504 : * This is a live tuple, so try to lock it again.
505 : */
506 340 : ReleaseBuffer(buffer);
507 340 : goto tuple_lock_retry;
508 : }
509 :
510 : /*
511 : * If the referenced slot was actually empty, the latest
512 : * version of the row must have been deleted, so we need do
513 : * nothing.
514 : */
515 68 : if (tuple->t_data == NULL)
516 : {
517 : Assert(!BufferIsValid(buffer));
518 0 : return TM_Deleted;
519 : }
520 :
521 : /*
522 : * As above, if xmin isn't what we're expecting, do nothing.
523 : */
524 68 : if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
525 : priorXmax))
526 : {
527 0 : ReleaseBuffer(buffer);
528 0 : return TM_Deleted;
529 : }
530 :
531 : /*
532 : * If we get here, the tuple was found but failed
533 : * SnapshotDirty. Assuming the xmin is either a committed xact
534 : * or our own xact (as it certainly should be if we're trying
535 : * to modify the tuple), this must mean that the row was
536 : * updated or deleted by either a committed xact or our own
537 : * xact. If it was deleted, we can ignore it; if it was
538 : * updated then chain up to the next version and repeat the
539 : * whole process.
540 : *
541 : * As above, it should be safe to examine xmax and t_ctid
542 : * without the buffer content lock, because they can't be
543 : * changing. We'd better hold a buffer pin though.
544 : */
545 68 : if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
546 : {
547 : /* deleted, so forget about it */
548 6 : ReleaseBuffer(buffer);
549 6 : return TM_Deleted;
550 : }
551 :
552 : /* updated, so look at the updated row */
553 62 : *tid = tuple->t_data->t_ctid;
554 : /* updated row should have xmin matching this xmax */
555 62 : priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
556 62 : ReleaseBuffer(buffer);
557 : /* loop back to fetch next in chain */
558 : }
559 : }
560 : else
561 : {
562 : /* tuple was deleted, so give up */
563 0 : return TM_Deleted;
564 : }
565 : }
566 :
567 316388 : slot->tts_tableOid = RelationGetRelid(relation);
568 316388 : tuple->t_tableOid = slot->tts_tableOid;
569 :
570 : /* store in slot, transferring existing pin */
571 316388 : ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
572 :
573 316388 : return result;
574 : }
575 :
576 :
577 : /* ------------------------------------------------------------------------
578 : * DDL related callbacks for heap AM.
579 : * ------------------------------------------------------------------------
580 : */
581 :
582 : static void
583 67560 : heapam_relation_set_new_filelocator(Relation rel,
584 : const RelFileLocator *newrlocator,
585 : char persistence,
586 : TransactionId *freezeXid,
587 : MultiXactId *minmulti)
588 : {
589 : SMgrRelation srel;
590 :
591 : /*
592 : * Initialize to the minimum XID that could put tuples in the table. We
593 : * know that no xacts older than RecentXmin are still running, so that
594 : * will do.
595 : */
596 67560 : *freezeXid = RecentXmin;
597 :
598 : /*
599 : * Similarly, initialize the minimum Multixact to the first value that
600 : * could possibly be stored in tuples in the table. Running transactions
601 : * could reuse values from their local cache, so we are careful to
602 : * consider all currently running multis.
603 : *
604 : * XXX this could be refined further, but is it worth the hassle?
605 : */
606 67560 : *minmulti = GetOldestMultiXactId();
607 :
608 67560 : srel = RelationCreateStorage(*newrlocator, persistence, true);
609 :
610 : /*
611 : * If required, set up an init fork for an unlogged table so that it can
612 : * be correctly reinitialized on restart.
613 : */
614 67560 : if (persistence == RELPERSISTENCE_UNLOGGED)
615 : {
616 : Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
617 : rel->rd_rel->relkind == RELKIND_TOASTVALUE);
618 270 : smgrcreate(srel, INIT_FORKNUM, false);
619 270 : log_smgrcreate(newrlocator, INIT_FORKNUM);
620 : }
621 :
622 67560 : smgrclose(srel);
623 67560 : }
624 :
625 : static void
626 624 : heapam_relation_nontransactional_truncate(Relation rel)
627 : {
628 624 : RelationTruncate(rel, 0);
629 624 : }
630 :
631 : static void
632 98 : heapam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator)
633 : {
634 : SMgrRelation dstrel;
635 :
636 : /*
637 : * Since we copy the file directly without looking at the shared buffers,
638 : * we'd better first flush out any pages of the source relation that are
639 : * in shared buffers. We assume no new changes will be made while we are
640 : * holding exclusive lock on the rel.
641 : */
642 98 : FlushRelationBuffers(rel);
643 :
644 : /*
645 : * Create and copy all forks of the relation, and schedule unlinking of
646 : * old physical files.
647 : *
648 : * NOTE: any conflict in relfilenumber value will be caught in
649 : * RelationCreateStorage().
650 : */
651 98 : dstrel = RelationCreateStorage(*newrlocator, rel->rd_rel->relpersistence, true);
652 :
653 : /* copy main fork */
654 98 : RelationCopyStorage(RelationGetSmgr(rel), dstrel, MAIN_FORKNUM,
655 98 : rel->rd_rel->relpersistence);
656 :
657 : /* copy those extra forks that exist */
658 98 : for (ForkNumber forkNum = MAIN_FORKNUM + 1;
659 392 : forkNum <= MAX_FORKNUM; forkNum++)
660 : {
661 294 : if (smgrexists(RelationGetSmgr(rel), forkNum))
662 : {
663 18 : smgrcreate(dstrel, forkNum, false);
664 :
665 : /*
666 : * WAL log creation if the relation is persistent, or this is the
667 : * init fork of an unlogged relation.
668 : */
669 18 : if (RelationIsPermanent(rel) ||
670 6 : (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
671 : forkNum == INIT_FORKNUM))
672 12 : log_smgrcreate(newrlocator, forkNum);
673 18 : RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
674 18 : rel->rd_rel->relpersistence);
675 : }
676 : }
677 :
678 :
679 : /* drop old relation, and close new one */
680 98 : RelationDropStorage(rel);
681 98 : smgrclose(dstrel);
682 98 : }
683 :
684 : static void
685 568 : heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
686 : Relation OldIndex, bool use_sort,
687 : TransactionId OldestXmin,
688 : TransactionId *xid_cutoff,
689 : MultiXactId *multi_cutoff,
690 : double *num_tuples,
691 : double *tups_vacuumed,
692 : double *tups_recently_dead)
693 : {
694 : RewriteState rwstate;
695 : IndexScanDesc indexScan;
696 : TableScanDesc tableScan;
697 : HeapScanDesc heapScan;
698 : bool is_system_catalog;
699 : Tuplesortstate *tuplesort;
700 568 : TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
701 568 : TupleDesc newTupDesc = RelationGetDescr(NewHeap);
702 : TupleTableSlot *slot;
703 : int natts;
704 : Datum *values;
705 : bool *isnull;
706 : BufferHeapTupleTableSlot *hslot;
707 568 : BlockNumber prev_cblock = InvalidBlockNumber;
708 :
709 : /* Remember if it's a system catalog */
710 568 : is_system_catalog = IsSystemRelation(OldHeap);
711 :
712 : /*
713 : * Valid smgr_targblock implies something already wrote to the relation.
714 : * This may be harmless, but this function hasn't planned for it.
715 : */
716 : Assert(RelationGetTargetBlock(NewHeap) == InvalidBlockNumber);
717 :
718 : /* Preallocate values/isnull arrays */
719 568 : natts = newTupDesc->natts;
720 568 : values = palloc_array(Datum, natts);
721 568 : isnull = palloc_array(bool, natts);
722 :
723 : /* Initialize the rewrite operation */
724 568 : rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
725 : *multi_cutoff);
726 :
727 :
728 : /* Set up sorting if wanted */
729 568 : if (use_sort)
730 110 : tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex,
731 : maintenance_work_mem,
732 : NULL, TUPLESORT_NONE);
733 : else
734 458 : tuplesort = NULL;
735 :
736 : /*
737 : * Prepare to scan the OldHeap. To ensure we see recently-dead tuples
738 : * that still need to be copied, we scan with SnapshotAny and use
739 : * HeapTupleSatisfiesVacuum for the visibility test.
740 : */
741 568 : if (OldIndex != NULL && !use_sort)
742 78 : {
743 78 : const int ci_index[] = {
744 : PROGRESS_CLUSTER_PHASE,
745 : PROGRESS_CLUSTER_INDEX_RELID
746 : };
747 : int64 ci_val[2];
748 :
749 : /* Set phase and OIDOldIndex to columns */
750 78 : ci_val[0] = PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP;
751 78 : ci_val[1] = RelationGetRelid(OldIndex);
752 78 : pgstat_progress_update_multi_param(2, ci_index, ci_val);
753 :
754 78 : tableScan = NULL;
755 78 : heapScan = NULL;
756 78 : indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, NULL, 0, 0);
757 78 : index_rescan(indexScan, NULL, 0, NULL, 0);
758 : }
759 : else
760 : {
761 : /* In scan-and-sort mode and also VACUUM FULL, set phase */
762 490 : pgstat_progress_update_param(PROGRESS_CLUSTER_PHASE,
763 : PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP);
764 :
765 490 : tableScan = table_beginscan(OldHeap, SnapshotAny, 0, (ScanKey) NULL);
766 490 : heapScan = (HeapScanDesc) tableScan;
767 490 : indexScan = NULL;
768 :
769 : /* Set total heap blocks */
770 490 : pgstat_progress_update_param(PROGRESS_CLUSTER_TOTAL_HEAP_BLKS,
771 490 : heapScan->rs_nblocks);
772 : }
773 :
774 568 : slot = table_slot_create(OldHeap, NULL);
775 568 : hslot = (BufferHeapTupleTableSlot *) slot;
776 :
777 : /*
778 : * Scan through the OldHeap, either in OldIndex order or sequentially;
779 : * copy each tuple into the NewHeap, or transiently to the tuplesort
780 : * module. Note that we don't bother sorting dead tuples (they won't get
781 : * to the new table anyway).
782 : */
783 : for (;;)
784 761470 : {
785 : HeapTuple tuple;
786 : Buffer buf;
787 : bool isdead;
788 :
789 762038 : CHECK_FOR_INTERRUPTS();
790 :
791 762038 : if (indexScan != NULL)
792 : {
793 186 : if (!index_getnext_slot(indexScan, ForwardScanDirection, slot))
794 78 : break;
795 :
796 : /* Since we used no scan keys, should never need to recheck */
797 108 : if (indexScan->xs_recheck)
798 0 : elog(ERROR, "CLUSTER does not support lossy index conditions");
799 : }
800 : else
801 : {
802 761852 : if (!table_scan_getnextslot(tableScan, ForwardScanDirection, slot))
803 : {
804 : /*
805 : * If the last pages of the scan were empty, we would go to
806 : * the next phase while heap_blks_scanned != heap_blks_total.
807 : * Instead, to ensure that heap_blks_scanned is equivalent to
808 : * heap_blks_total after the table scan phase, this parameter
809 : * is manually updated to the correct value when the table
810 : * scan finishes.
811 : */
812 490 : pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_BLKS_SCANNED,
813 490 : heapScan->rs_nblocks);
814 490 : break;
815 : }
816 :
817 : /*
818 : * In scan-and-sort mode and also VACUUM FULL, set heap blocks
819 : * scanned
820 : *
821 : * Note that heapScan may start at an offset and wrap around, i.e.
822 : * rs_startblock may be >0, and rs_cblock may end with a number
823 : * below rs_startblock. To prevent showing this wraparound to the
824 : * user, we offset rs_cblock by rs_startblock (modulo rs_nblocks).
825 : */
826 761362 : if (prev_cblock != heapScan->rs_cblock)
827 : {
828 11184 : pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_BLKS_SCANNED,
829 11184 : (heapScan->rs_cblock +
830 11184 : heapScan->rs_nblocks -
831 11184 : heapScan->rs_startblock
832 11184 : ) % heapScan->rs_nblocks + 1);
833 11184 : prev_cblock = heapScan->rs_cblock;
834 : }
835 : }
836 :
837 761470 : tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
838 761470 : buf = hslot->buffer;
839 :
840 : /*
841 : * To be able to guarantee that we can set the hint bit, acquire an
842 : * exclusive lock on the old buffer. We need the hint bits, set in
843 : * heapam_relation_copy_for_cluster() -> HeapTupleSatisfiesVacuum(),
844 : * to be set, as otherwise reform_and_rewrite_tuple() ->
845 : * rewrite_heap_tuple() will get confused. Specifically,
846 : * rewrite_heap_tuple() checks for HEAP_XMAX_INVALID in the old tuple
847 : * to determine whether to check the old-to-new mapping hash table.
848 : *
849 : * It'd be better if we somehow could avoid setting hint bits on the
850 : * old page. One reason to use VACUUM FULL are very bloated tables -
851 : * rewriting most of the old table during VACUUM FULL doesn't exactly
852 : * help...
853 : */
854 761470 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
855 :
856 761470 : switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
857 : {
858 33146 : case HEAPTUPLE_DEAD:
859 : /* Definitely dead */
860 33146 : isdead = true;
861 33146 : break;
862 33826 : case HEAPTUPLE_RECENTLY_DEAD:
863 33826 : *tups_recently_dead += 1;
864 : /* fall through */
865 728114 : case HEAPTUPLE_LIVE:
866 : /* Live or recently dead, must copy it */
867 728114 : isdead = false;
868 728114 : break;
869 160 : case HEAPTUPLE_INSERT_IN_PROGRESS:
870 :
871 : /*
872 : * Since we hold exclusive lock on the relation, normally the
873 : * only way to see this is if it was inserted earlier in our
874 : * own transaction. However, it can happen in system
875 : * catalogs, since we tend to release write lock before commit
876 : * there. Give a warning if neither case applies; but in any
877 : * case we had better copy it.
878 : */
879 160 : if (!is_system_catalog &&
880 22 : !TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
881 0 : elog(WARNING, "concurrent insert in progress within table \"%s\"",
882 : RelationGetRelationName(OldHeap));
883 : /* treat as live */
884 160 : isdead = false;
885 160 : break;
886 50 : case HEAPTUPLE_DELETE_IN_PROGRESS:
887 :
888 : /*
889 : * Similar situation to INSERT_IN_PROGRESS case.
890 : */
891 50 : if (!is_system_catalog &&
892 30 : !TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tuple->t_data)))
893 0 : elog(WARNING, "concurrent delete in progress within table \"%s\"",
894 : RelationGetRelationName(OldHeap));
895 : /* treat as recently dead */
896 50 : *tups_recently_dead += 1;
897 50 : isdead = false;
898 50 : break;
899 0 : default:
900 0 : elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
901 : isdead = false; /* keep compiler quiet */
902 : break;
903 : }
904 :
905 761470 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
906 :
907 761470 : if (isdead)
908 : {
909 33146 : *tups_vacuumed += 1;
910 : /* heap rewrite module still needs to see it... */
911 33146 : if (rewrite_heap_dead_tuple(rwstate, tuple))
912 : {
913 : /* A previous recently-dead tuple is now known dead */
914 0 : *tups_vacuumed += 1;
915 0 : *tups_recently_dead -= 1;
916 : }
917 33146 : continue;
918 : }
919 :
920 728324 : *num_tuples += 1;
921 728324 : if (tuplesort != NULL)
922 : {
923 547414 : tuplesort_putheaptuple(tuplesort, tuple);
924 :
925 : /*
926 : * In scan-and-sort mode, report increase in number of tuples
927 : * scanned
928 : */
929 547414 : pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED,
930 547414 : *num_tuples);
931 : }
932 : else
933 : {
934 180910 : const int ct_index[] = {
935 : PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED,
936 : PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN
937 : };
938 : int64 ct_val[2];
939 :
940 180910 : reform_and_rewrite_tuple(tuple, OldHeap, NewHeap,
941 : values, isnull, rwstate);
942 :
943 : /*
944 : * In indexscan mode and also VACUUM FULL, report increase in
945 : * number of tuples scanned and written
946 : */
947 180910 : ct_val[0] = *num_tuples;
948 180910 : ct_val[1] = *num_tuples;
949 180910 : pgstat_progress_update_multi_param(2, ct_index, ct_val);
950 : }
951 : }
952 :
953 568 : if (indexScan != NULL)
954 78 : index_endscan(indexScan);
955 568 : if (tableScan != NULL)
956 490 : table_endscan(tableScan);
957 568 : if (slot)
958 568 : ExecDropSingleTupleTableSlot(slot);
959 :
960 : /*
961 : * In scan-and-sort mode, complete the sort, then read out all live tuples
962 : * from the tuplestore and write them to the new relation.
963 : */
964 568 : if (tuplesort != NULL)
965 : {
966 110 : double n_tuples = 0;
967 :
968 : /* Report that we are now sorting tuples */
969 110 : pgstat_progress_update_param(PROGRESS_CLUSTER_PHASE,
970 : PROGRESS_CLUSTER_PHASE_SORT_TUPLES);
971 :
972 110 : tuplesort_performsort(tuplesort);
973 :
974 : /* Report that we are now writing new heap */
975 110 : pgstat_progress_update_param(PROGRESS_CLUSTER_PHASE,
976 : PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP);
977 :
978 : for (;;)
979 547414 : {
980 : HeapTuple tuple;
981 :
982 547524 : CHECK_FOR_INTERRUPTS();
983 :
984 547524 : tuple = tuplesort_getheaptuple(tuplesort, true);
985 547524 : if (tuple == NULL)
986 110 : break;
987 :
988 547414 : n_tuples += 1;
989 547414 : reform_and_rewrite_tuple(tuple,
990 : OldHeap, NewHeap,
991 : values, isnull,
992 : rwstate);
993 : /* Report n_tuples */
994 547414 : pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN,
995 : n_tuples);
996 : }
997 :
998 110 : tuplesort_end(tuplesort);
999 : }
1000 :
1001 : /* Write out any remaining tuples, and fsync if needed */
1002 568 : end_heap_rewrite(rwstate);
1003 :
1004 : /* Clean up */
1005 568 : pfree(values);
1006 568 : pfree(isnull);
1007 568 : }
1008 :
1009 : /*
1010 : * Prepare to analyze the next block in the read stream. Returns false if
1011 : * the stream is exhausted and true otherwise. The scan must have been started
1012 : * with SO_TYPE_ANALYZE option.
1013 : *
1014 : * This routine holds a buffer pin and lock on the heap page. They are held
1015 : * until heapam_scan_analyze_next_tuple() returns false. That is until all the
1016 : * items of the heap page are analyzed.
1017 : */
1018 : static bool
1019 168714 : heapam_scan_analyze_next_block(TableScanDesc scan, ReadStream *stream)
1020 : {
1021 168714 : HeapScanDesc hscan = (HeapScanDesc) scan;
1022 :
1023 : /*
1024 : * We must maintain a pin on the target page's buffer to ensure that
1025 : * concurrent activity - e.g. HOT pruning - doesn't delete tuples out from
1026 : * under us. It comes from the stream already pinned. We also choose to
1027 : * hold sharelock on the buffer throughout --- we could release and
1028 : * re-acquire sharelock for each tuple, but since we aren't doing much
1029 : * work per tuple, the extra lock traffic is probably better avoided.
1030 : */
1031 168714 : hscan->rs_cbuf = read_stream_next_buffer(stream, NULL);
1032 168714 : if (!BufferIsValid(hscan->rs_cbuf))
1033 18046 : return false;
1034 :
1035 150668 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
1036 :
1037 150668 : hscan->rs_cblock = BufferGetBlockNumber(hscan->rs_cbuf);
1038 150668 : hscan->rs_cindex = FirstOffsetNumber;
1039 150668 : return true;
1040 : }
1041 :
1042 : static bool
1043 11476112 : heapam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
1044 : double *liverows, double *deadrows,
1045 : TupleTableSlot *slot)
1046 : {
1047 11476112 : HeapScanDesc hscan = (HeapScanDesc) scan;
1048 : Page targpage;
1049 : OffsetNumber maxoffset;
1050 : BufferHeapTupleTableSlot *hslot;
1051 :
1052 : Assert(TTS_IS_BUFFERTUPLE(slot));
1053 :
1054 11476112 : hslot = (BufferHeapTupleTableSlot *) slot;
1055 11476112 : targpage = BufferGetPage(hscan->rs_cbuf);
1056 11476112 : maxoffset = PageGetMaxOffsetNumber(targpage);
1057 :
1058 : /* Inner loop over all tuples on the selected page */
1059 12097740 : for (; hscan->rs_cindex <= maxoffset; hscan->rs_cindex++)
1060 : {
1061 : ItemId itemid;
1062 11947072 : HeapTuple targtuple = &hslot->base.tupdata;
1063 11947072 : bool sample_it = false;
1064 :
1065 11947072 : itemid = PageGetItemId(targpage, hscan->rs_cindex);
1066 :
1067 : /*
1068 : * We ignore unused and redirect line pointers. DEAD line pointers
1069 : * should be counted as dead, because we need vacuum to run to get rid
1070 : * of them. Note that this rule agrees with the way that
1071 : * heap_page_prune_and_freeze() counts things.
1072 : */
1073 11947072 : if (!ItemIdIsNormal(itemid))
1074 : {
1075 427840 : if (ItemIdIsDead(itemid))
1076 139032 : *deadrows += 1;
1077 427840 : continue;
1078 : }
1079 :
1080 11519232 : ItemPointerSet(&targtuple->t_self, hscan->rs_cblock, hscan->rs_cindex);
1081 :
1082 11519232 : targtuple->t_tableOid = RelationGetRelid(scan->rs_rd);
1083 11519232 : targtuple->t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1084 11519232 : targtuple->t_len = ItemIdGetLength(itemid);
1085 :
1086 11519232 : switch (HeapTupleSatisfiesVacuum(targtuple, OldestXmin,
1087 : hscan->rs_cbuf))
1088 : {
1089 10865220 : case HEAPTUPLE_LIVE:
1090 10865220 : sample_it = true;
1091 10865220 : *liverows += 1;
1092 10865220 : break;
1093 :
1094 191636 : case HEAPTUPLE_DEAD:
1095 : case HEAPTUPLE_RECENTLY_DEAD:
1096 : /* Count dead and recently-dead rows */
1097 191636 : *deadrows += 1;
1098 191636 : break;
1099 :
1100 346726 : case HEAPTUPLE_INSERT_IN_PROGRESS:
1101 :
1102 : /*
1103 : * Insert-in-progress rows are not counted. We assume that
1104 : * when the inserting transaction commits or aborts, it will
1105 : * send a stats message to increment the proper count. This
1106 : * works right only if that transaction ends after we finish
1107 : * analyzing the table; if things happen in the other order,
1108 : * its stats update will be overwritten by ours. However, the
1109 : * error will be large only if the other transaction runs long
1110 : * enough to insert many tuples, so assuming it will finish
1111 : * after us is the safer option.
1112 : *
1113 : * A special case is that the inserting transaction might be
1114 : * our own. In this case we should count and sample the row,
1115 : * to accommodate users who load a table and analyze it in one
1116 : * transaction. (pgstat_report_analyze has to adjust the
1117 : * numbers we report to the cumulative stats system to make
1118 : * this come out right.)
1119 : */
1120 346726 : if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple->t_data)))
1121 : {
1122 346290 : sample_it = true;
1123 346290 : *liverows += 1;
1124 : }
1125 346726 : break;
1126 :
1127 115650 : case HEAPTUPLE_DELETE_IN_PROGRESS:
1128 :
1129 : /*
1130 : * We count and sample delete-in-progress rows the same as
1131 : * live ones, so that the stats counters come out right if the
1132 : * deleting transaction commits after us, per the same
1133 : * reasoning given above.
1134 : *
1135 : * If the delete was done by our own transaction, however, we
1136 : * must count the row as dead to make pgstat_report_analyze's
1137 : * stats adjustments come out right. (Note: this works out
1138 : * properly when the row was both inserted and deleted in our
1139 : * xact.)
1140 : *
1141 : * The net effect of these choices is that we act as though an
1142 : * IN_PROGRESS transaction hasn't happened yet, except if it
1143 : * is our own transaction, which we assume has happened.
1144 : *
1145 : * This approach ensures that we behave sanely if we see both
1146 : * the pre-image and post-image rows for a row being updated
1147 : * by a concurrent transaction: we will sample the pre-image
1148 : * but not the post-image. We also get sane results if the
1149 : * concurrent transaction never commits.
1150 : */
1151 115650 : if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(targtuple->t_data)))
1152 1716 : *deadrows += 1;
1153 : else
1154 : {
1155 113934 : sample_it = true;
1156 113934 : *liverows += 1;
1157 : }
1158 115650 : break;
1159 :
1160 0 : default:
1161 0 : elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1162 : break;
1163 : }
1164 :
1165 11519232 : if (sample_it)
1166 : {
1167 11325444 : ExecStoreBufferHeapTuple(targtuple, slot, hscan->rs_cbuf);
1168 11325444 : hscan->rs_cindex++;
1169 :
1170 : /* note that we leave the buffer locked here! */
1171 11325444 : return true;
1172 : }
1173 : }
1174 :
1175 : /* Now release the lock and pin on the page */
1176 150668 : UnlockReleaseBuffer(hscan->rs_cbuf);
1177 150668 : hscan->rs_cbuf = InvalidBuffer;
1178 :
1179 : /* also prevent old slot contents from having pin on page */
1180 150668 : ExecClearTuple(slot);
1181 :
1182 150668 : return false;
1183 : }
1184 :
1185 : static double
1186 58270 : heapam_index_build_range_scan(Relation heapRelation,
1187 : Relation indexRelation,
1188 : IndexInfo *indexInfo,
1189 : bool allow_sync,
1190 : bool anyvisible,
1191 : bool progress,
1192 : BlockNumber start_blockno,
1193 : BlockNumber numblocks,
1194 : IndexBuildCallback callback,
1195 : void *callback_state,
1196 : TableScanDesc scan)
1197 : {
1198 : HeapScanDesc hscan;
1199 : bool is_system_catalog;
1200 : bool checking_uniqueness;
1201 : HeapTuple heapTuple;
1202 : Datum values[INDEX_MAX_KEYS];
1203 : bool isnull[INDEX_MAX_KEYS];
1204 : double reltuples;
1205 : ExprState *predicate;
1206 : TupleTableSlot *slot;
1207 : EState *estate;
1208 : ExprContext *econtext;
1209 : Snapshot snapshot;
1210 58270 : bool need_unregister_snapshot = false;
1211 : TransactionId OldestXmin;
1212 58270 : BlockNumber previous_blkno = InvalidBlockNumber;
1213 58270 : BlockNumber root_blkno = InvalidBlockNumber;
1214 : OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1215 :
1216 : /*
1217 : * sanity checks
1218 : */
1219 : Assert(OidIsValid(indexRelation->rd_rel->relam));
1220 :
1221 : /* Remember if it's a system catalog */
1222 58270 : is_system_catalog = IsSystemRelation(heapRelation);
1223 :
1224 : /* See whether we're verifying uniqueness/exclusion properties */
1225 73522 : checking_uniqueness = (indexInfo->ii_Unique ||
1226 15252 : indexInfo->ii_ExclusionOps != NULL);
1227 :
1228 : /*
1229 : * "Any visible" mode is not compatible with uniqueness checks; make sure
1230 : * only one of those is requested.
1231 : */
1232 : Assert(!(anyvisible && checking_uniqueness));
1233 :
1234 : /*
1235 : * Need an EState for evaluation of index expressions and partial-index
1236 : * predicates. Also a slot to hold the current tuple.
1237 : */
1238 58270 : estate = CreateExecutorState();
1239 58270 : econtext = GetPerTupleExprContext(estate);
1240 58270 : slot = table_slot_create(heapRelation, NULL);
1241 :
1242 : /* Arrange for econtext's scan tuple to be the tuple under test */
1243 58270 : econtext->ecxt_scantuple = slot;
1244 :
1245 : /* Set up execution state for predicate, if any. */
1246 58270 : predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1247 :
1248 : /*
1249 : * Prepare for scan of the base relation. In a normal index build, we use
1250 : * SnapshotAny because we must retrieve all tuples and do our own time
1251 : * qual checks (because we have to index RECENTLY_DEAD tuples). In a
1252 : * concurrent build, or during bootstrap, we take a regular MVCC snapshot
1253 : * and index whatever's live according to that.
1254 : */
1255 58270 : OldestXmin = InvalidTransactionId;
1256 :
1257 : /* okay to ignore lazy VACUUMs here */
1258 58270 : if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
1259 41144 : OldestXmin = GetOldestNonRemovableTransactionId(heapRelation);
1260 :
1261 58270 : if (!scan)
1262 : {
1263 : /*
1264 : * Serial index build.
1265 : *
1266 : * Must begin our own heap scan in this case. We may also need to
1267 : * register a snapshot whose lifetime is under our direct control.
1268 : */
1269 57796 : if (!TransactionIdIsValid(OldestXmin))
1270 : {
1271 16980 : snapshot = RegisterSnapshot(GetTransactionSnapshot());
1272 16980 : need_unregister_snapshot = true;
1273 : }
1274 : else
1275 40816 : snapshot = SnapshotAny;
1276 :
1277 57796 : scan = table_beginscan_strat(heapRelation, /* relation */
1278 : snapshot, /* snapshot */
1279 : 0, /* number of keys */
1280 : NULL, /* scan key */
1281 : true, /* buffer access strategy OK */
1282 : allow_sync); /* syncscan OK? */
1283 : }
1284 : else
1285 : {
1286 : /*
1287 : * Parallel index build.
1288 : *
1289 : * Parallel case never registers/unregisters own snapshot. Snapshot
1290 : * is taken from parallel heap scan, and is SnapshotAny or an MVCC
1291 : * snapshot, based on same criteria as serial case.
1292 : */
1293 : Assert(!IsBootstrapProcessingMode());
1294 : Assert(allow_sync);
1295 474 : snapshot = scan->rs_snapshot;
1296 : }
1297 :
1298 58270 : hscan = (HeapScanDesc) scan;
1299 :
1300 : /*
1301 : * Must have called GetOldestNonRemovableTransactionId() if using
1302 : * SnapshotAny. Shouldn't have for an MVCC snapshot. (It's especially
1303 : * worth checking this for parallel builds, since ambuild routines that
1304 : * support parallel builds must work these details out for themselves.)
1305 : */
1306 : Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot));
1307 : Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) :
1308 : !TransactionIdIsValid(OldestXmin));
1309 : Assert(snapshot == SnapshotAny || !anyvisible);
1310 :
1311 : /* Publish number of blocks to scan */
1312 58270 : if (progress)
1313 : {
1314 : BlockNumber nblocks;
1315 :
1316 55022 : if (hscan->rs_base.rs_parallel != NULL)
1317 : {
1318 : ParallelBlockTableScanDesc pbscan;
1319 :
1320 174 : pbscan = (ParallelBlockTableScanDesc) hscan->rs_base.rs_parallel;
1321 174 : nblocks = pbscan->phs_nblocks;
1322 : }
1323 : else
1324 54848 : nblocks = hscan->rs_nblocks;
1325 :
1326 55022 : pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_TOTAL,
1327 : nblocks);
1328 : }
1329 :
1330 : /* set our scan endpoints */
1331 58270 : if (!allow_sync)
1332 3728 : heap_setscanlimits(scan, start_blockno, numblocks);
1333 : else
1334 : {
1335 : /* syncscan can only be requested on whole relation */
1336 : Assert(start_blockno == 0);
1337 : Assert(numblocks == InvalidBlockNumber);
1338 : }
1339 :
1340 58270 : reltuples = 0;
1341 :
1342 : /*
1343 : * Scan all tuples in the base relation.
1344 : */
1345 17747470 : while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1346 : {
1347 : bool tupleIsAlive;
1348 :
1349 17689212 : CHECK_FOR_INTERRUPTS();
1350 :
1351 : /* Report scan progress, if asked to. */
1352 17689212 : if (progress)
1353 : {
1354 15022672 : BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
1355 :
1356 15022672 : if (blocks_done != previous_blkno)
1357 : {
1358 194678 : pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
1359 : blocks_done);
1360 194678 : previous_blkno = blocks_done;
1361 : }
1362 : }
1363 :
1364 : /*
1365 : * When dealing with a HOT-chain of updated tuples, we want to index
1366 : * the values of the live tuple (if any), but index it under the TID
1367 : * of the chain's root tuple. This approach is necessary to preserve
1368 : * the HOT-chain structure in the heap. So we need to be able to find
1369 : * the root item offset for every tuple that's in a HOT-chain. When
1370 : * first reaching a new page of the relation, call
1371 : * heap_get_root_tuples() to build a map of root item offsets on the
1372 : * page.
1373 : *
1374 : * It might look unsafe to use this information across buffer
1375 : * lock/unlock. However, we hold ShareLock on the table so no
1376 : * ordinary insert/update/delete should occur; and we hold pin on the
1377 : * buffer continuously while visiting the page, so no pruning
1378 : * operation can occur either.
1379 : *
1380 : * In cases with only ShareUpdateExclusiveLock on the table, it's
1381 : * possible for some HOT tuples to appear that we didn't know about
1382 : * when we first read the page. To handle that case, we re-obtain the
1383 : * list of root offsets when a HOT tuple points to a root item that we
1384 : * don't know about.
1385 : *
1386 : * Also, although our opinions about tuple liveness could change while
1387 : * we scan the page (due to concurrent transaction commits/aborts),
1388 : * the chain root locations won't, so this info doesn't need to be
1389 : * rebuilt after waiting for another transaction.
1390 : *
1391 : * Note the implied assumption that there is no more than one live
1392 : * tuple per HOT-chain --- else we could create more than one index
1393 : * entry pointing to the same root tuple.
1394 : */
1395 17689212 : if (hscan->rs_cblock != root_blkno)
1396 : {
1397 221140 : Page page = BufferGetPage(hscan->rs_cbuf);
1398 :
1399 221140 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
1400 221140 : heap_get_root_tuples(page, root_offsets);
1401 221140 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1402 :
1403 221140 : root_blkno = hscan->rs_cblock;
1404 : }
1405 :
1406 17689212 : if (snapshot == SnapshotAny)
1407 : {
1408 : /* do our own time qual check */
1409 : bool indexIt;
1410 : TransactionId xwait;
1411 :
1412 14005930 : recheck:
1413 :
1414 : /*
1415 : * We could possibly get away with not locking the buffer here,
1416 : * since caller should hold ShareLock on the relation, but let's
1417 : * be conservative about it. (This remark is still correct even
1418 : * with HOT-pruning: our pin on the buffer prevents pruning.)
1419 : */
1420 14005930 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
1421 :
1422 : /*
1423 : * The criteria for counting a tuple as live in this block need to
1424 : * match what analyze.c's heapam_scan_analyze_next_tuple() does,
1425 : * otherwise CREATE INDEX and ANALYZE may produce wildly different
1426 : * reltuples values, e.g. when there are many recently-dead
1427 : * tuples.
1428 : */
1429 14005930 : switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
1430 : hscan->rs_cbuf))
1431 : {
1432 32146 : case HEAPTUPLE_DEAD:
1433 : /* Definitely dead, we can ignore it */
1434 32146 : indexIt = false;
1435 32146 : tupleIsAlive = false;
1436 32146 : break;
1437 10224414 : case HEAPTUPLE_LIVE:
1438 : /* Normal case, index and unique-check it */
1439 10224414 : indexIt = true;
1440 10224414 : tupleIsAlive = true;
1441 : /* Count it as live, too */
1442 10224414 : reltuples += 1;
1443 10224414 : break;
1444 202152 : case HEAPTUPLE_RECENTLY_DEAD:
1445 :
1446 : /*
1447 : * If tuple is recently deleted then we must index it
1448 : * anyway to preserve MVCC semantics. (Pre-existing
1449 : * transactions could try to use the index after we finish
1450 : * building it, and may need to see such tuples.)
1451 : *
1452 : * However, if it was HOT-updated then we must only index
1453 : * the live tuple at the end of the HOT-chain. Since this
1454 : * breaks semantics for pre-existing snapshots, mark the
1455 : * index as unusable for them.
1456 : *
1457 : * We don't count recently-dead tuples in reltuples, even
1458 : * if we index them; see heapam_scan_analyze_next_tuple().
1459 : */
1460 202152 : if (HeapTupleIsHotUpdated(heapTuple))
1461 : {
1462 226 : indexIt = false;
1463 : /* mark the index as unsafe for old snapshots */
1464 226 : indexInfo->ii_BrokenHotChain = true;
1465 : }
1466 : else
1467 201926 : indexIt = true;
1468 : /* In any case, exclude the tuple from unique-checking */
1469 202152 : tupleIsAlive = false;
1470 202152 : break;
1471 3547128 : case HEAPTUPLE_INSERT_IN_PROGRESS:
1472 :
1473 : /*
1474 : * In "anyvisible" mode, this tuple is visible and we
1475 : * don't need any further checks.
1476 : */
1477 3547128 : if (anyvisible)
1478 : {
1479 61472 : indexIt = true;
1480 61472 : tupleIsAlive = true;
1481 61472 : reltuples += 1;
1482 61472 : break;
1483 : }
1484 :
1485 : /*
1486 : * Since caller should hold ShareLock or better, normally
1487 : * the only way to see this is if it was inserted earlier
1488 : * in our own transaction. However, it can happen in
1489 : * system catalogs, since we tend to release write lock
1490 : * before commit there. Give a warning if neither case
1491 : * applies.
1492 : */
1493 3485656 : xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
1494 3485656 : if (!TransactionIdIsCurrentTransactionId(xwait))
1495 : {
1496 30 : if (!is_system_catalog)
1497 0 : elog(WARNING, "concurrent insert in progress within table \"%s\"",
1498 : RelationGetRelationName(heapRelation));
1499 :
1500 : /*
1501 : * If we are performing uniqueness checks, indexing
1502 : * such a tuple could lead to a bogus uniqueness
1503 : * failure. In that case we wait for the inserting
1504 : * transaction to finish and check again.
1505 : */
1506 30 : if (checking_uniqueness)
1507 : {
1508 : /*
1509 : * Must drop the lock on the buffer before we wait
1510 : */
1511 0 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1512 0 : XactLockTableWait(xwait, heapRelation,
1513 0 : &heapTuple->t_self,
1514 : XLTW_InsertIndexUnique);
1515 0 : CHECK_FOR_INTERRUPTS();
1516 0 : goto recheck;
1517 : }
1518 : }
1519 : else
1520 : {
1521 : /*
1522 : * For consistency with
1523 : * heapam_scan_analyze_next_tuple(), count
1524 : * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only
1525 : * when inserted by our own transaction.
1526 : */
1527 3485626 : reltuples += 1;
1528 : }
1529 :
1530 : /*
1531 : * We must index such tuples, since if the index build
1532 : * commits then they're good.
1533 : */
1534 3485656 : indexIt = true;
1535 3485656 : tupleIsAlive = true;
1536 3485656 : break;
1537 90 : case HEAPTUPLE_DELETE_IN_PROGRESS:
1538 :
1539 : /*
1540 : * As with INSERT_IN_PROGRESS case, this is unexpected
1541 : * unless it's our own deletion or a system catalog; but
1542 : * in anyvisible mode, this tuple is visible.
1543 : */
1544 90 : if (anyvisible)
1545 : {
1546 0 : indexIt = true;
1547 0 : tupleIsAlive = false;
1548 0 : reltuples += 1;
1549 0 : break;
1550 : }
1551 :
1552 90 : xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1553 90 : if (!TransactionIdIsCurrentTransactionId(xwait))
1554 : {
1555 12 : if (!is_system_catalog)
1556 0 : elog(WARNING, "concurrent delete in progress within table \"%s\"",
1557 : RelationGetRelationName(heapRelation));
1558 :
1559 : /*
1560 : * If we are performing uniqueness checks, assuming
1561 : * the tuple is dead could lead to missing a
1562 : * uniqueness violation. In that case we wait for the
1563 : * deleting transaction to finish and check again.
1564 : *
1565 : * Also, if it's a HOT-updated tuple, we should not
1566 : * index it but rather the live tuple at the end of
1567 : * the HOT-chain. However, the deleting transaction
1568 : * could abort, possibly leaving this tuple as live
1569 : * after all, in which case it has to be indexed. The
1570 : * only way to know what to do is to wait for the
1571 : * deleting transaction to finish and check again.
1572 : */
1573 24 : if (checking_uniqueness ||
1574 12 : HeapTupleIsHotUpdated(heapTuple))
1575 : {
1576 : /*
1577 : * Must drop the lock on the buffer before we wait
1578 : */
1579 0 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1580 0 : XactLockTableWait(xwait, heapRelation,
1581 0 : &heapTuple->t_self,
1582 : XLTW_InsertIndexUnique);
1583 0 : CHECK_FOR_INTERRUPTS();
1584 0 : goto recheck;
1585 : }
1586 :
1587 : /*
1588 : * Otherwise index it but don't check for uniqueness,
1589 : * the same as a RECENTLY_DEAD tuple.
1590 : */
1591 12 : indexIt = true;
1592 :
1593 : /*
1594 : * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live,
1595 : * if they were not deleted by the current
1596 : * transaction. That's what
1597 : * heapam_scan_analyze_next_tuple() does, and we want
1598 : * the behavior to be consistent.
1599 : */
1600 12 : reltuples += 1;
1601 : }
1602 78 : else if (HeapTupleIsHotUpdated(heapTuple))
1603 : {
1604 : /*
1605 : * It's a HOT-updated tuple deleted by our own xact.
1606 : * We can assume the deletion will commit (else the
1607 : * index contents don't matter), so treat the same as
1608 : * RECENTLY_DEAD HOT-updated tuples.
1609 : */
1610 0 : indexIt = false;
1611 : /* mark the index as unsafe for old snapshots */
1612 0 : indexInfo->ii_BrokenHotChain = true;
1613 : }
1614 : else
1615 : {
1616 : /*
1617 : * It's a regular tuple deleted by our own xact. Index
1618 : * it, but don't check for uniqueness nor count in
1619 : * reltuples, the same as a RECENTLY_DEAD tuple.
1620 : */
1621 78 : indexIt = true;
1622 : }
1623 : /* In any case, exclude the tuple from unique-checking */
1624 90 : tupleIsAlive = false;
1625 90 : break;
1626 0 : default:
1627 0 : elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1628 : indexIt = tupleIsAlive = false; /* keep compiler quiet */
1629 : break;
1630 : }
1631 :
1632 14005930 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1633 :
1634 14005930 : if (!indexIt)
1635 32372 : continue;
1636 : }
1637 : else
1638 : {
1639 : /* heap_getnext did the time qual check */
1640 3683282 : tupleIsAlive = true;
1641 3683282 : reltuples += 1;
1642 : }
1643 :
1644 17656840 : MemoryContextReset(econtext->ecxt_per_tuple_memory);
1645 :
1646 : /* Set up for predicate or expression evaluation */
1647 17656840 : ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
1648 :
1649 : /*
1650 : * In a partial index, discard tuples that don't satisfy the
1651 : * predicate.
1652 : */
1653 17656840 : if (predicate != NULL)
1654 : {
1655 204570 : if (!ExecQual(predicate, econtext))
1656 109674 : continue;
1657 : }
1658 :
1659 : /*
1660 : * For the current heap tuple, extract all the attributes we use in
1661 : * this index, and note which are null. This also performs evaluation
1662 : * of any expressions needed.
1663 : */
1664 17547166 : FormIndexDatum(indexInfo,
1665 : slot,
1666 : estate,
1667 : values,
1668 : isnull);
1669 :
1670 : /*
1671 : * You'd think we should go ahead and build the index tuple here, but
1672 : * some index AMs want to do further processing on the data first. So
1673 : * pass the values[] and isnull[] arrays, instead.
1674 : */
1675 :
1676 17547154 : if (HeapTupleIsHeapOnly(heapTuple))
1677 : {
1678 : /*
1679 : * For a heap-only tuple, pretend its TID is that of the root. See
1680 : * src/backend/access/heap/README.HOT for discussion.
1681 : */
1682 : ItemPointerData tid;
1683 : OffsetNumber offnum;
1684 :
1685 8682 : offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
1686 :
1687 : /*
1688 : * If a HOT tuple points to a root that we don't know about,
1689 : * obtain root items afresh. If that still fails, report it as
1690 : * corruption.
1691 : */
1692 8682 : if (root_offsets[offnum - 1] == InvalidOffsetNumber)
1693 : {
1694 0 : Page page = BufferGetPage(hscan->rs_cbuf);
1695 :
1696 0 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
1697 0 : heap_get_root_tuples(page, root_offsets);
1698 0 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1699 : }
1700 :
1701 8682 : if (!OffsetNumberIsValid(root_offsets[offnum - 1]))
1702 0 : ereport(ERROR,
1703 : (errcode(ERRCODE_DATA_CORRUPTED),
1704 : errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1705 : ItemPointerGetBlockNumber(&heapTuple->t_self),
1706 : offnum,
1707 : RelationGetRelationName(heapRelation))));
1708 :
1709 8682 : ItemPointerSet(&tid, ItemPointerGetBlockNumber(&heapTuple->t_self),
1710 8682 : root_offsets[offnum - 1]);
1711 :
1712 : /* Call the AM's callback routine to process the tuple */
1713 8682 : callback(indexRelation, &tid, values, isnull, tupleIsAlive,
1714 : callback_state);
1715 : }
1716 : else
1717 : {
1718 : /* Call the AM's callback routine to process the tuple */
1719 17538472 : callback(indexRelation, &heapTuple->t_self, values, isnull,
1720 : tupleIsAlive, callback_state);
1721 : }
1722 : }
1723 :
1724 : /* Report scan progress one last time. */
1725 58258 : if (progress)
1726 : {
1727 : BlockNumber blks_done;
1728 :
1729 55010 : if (hscan->rs_base.rs_parallel != NULL)
1730 : {
1731 : ParallelBlockTableScanDesc pbscan;
1732 :
1733 174 : pbscan = (ParallelBlockTableScanDesc) hscan->rs_base.rs_parallel;
1734 174 : blks_done = pbscan->phs_nblocks;
1735 : }
1736 : else
1737 54836 : blks_done = hscan->rs_nblocks;
1738 :
1739 55010 : pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
1740 : blks_done);
1741 : }
1742 :
1743 58258 : table_endscan(scan);
1744 :
1745 : /* we can now forget our snapshot, if set and registered by us */
1746 58258 : if (need_unregister_snapshot)
1747 16974 : UnregisterSnapshot(snapshot);
1748 :
1749 58258 : ExecDropSingleTupleTableSlot(slot);
1750 :
1751 58258 : FreeExecutorState(estate);
1752 :
1753 : /* These may have been pointing to the now-gone estate */
1754 58258 : indexInfo->ii_ExpressionsState = NIL;
1755 58258 : indexInfo->ii_PredicateState = NULL;
1756 :
1757 58258 : return reltuples;
1758 : }
1759 :
1760 : static void
1761 738 : heapam_index_validate_scan(Relation heapRelation,
1762 : Relation indexRelation,
1763 : IndexInfo *indexInfo,
1764 : Snapshot snapshot,
1765 : ValidateIndexState *state)
1766 : {
1767 : TableScanDesc scan;
1768 : HeapScanDesc hscan;
1769 : HeapTuple heapTuple;
1770 : Datum values[INDEX_MAX_KEYS];
1771 : bool isnull[INDEX_MAX_KEYS];
1772 : ExprState *predicate;
1773 : TupleTableSlot *slot;
1774 : EState *estate;
1775 : ExprContext *econtext;
1776 738 : BlockNumber root_blkno = InvalidBlockNumber;
1777 : OffsetNumber root_offsets[MaxHeapTuplesPerPage];
1778 : bool in_index[MaxHeapTuplesPerPage];
1779 738 : BlockNumber previous_blkno = InvalidBlockNumber;
1780 :
1781 : /* state variables for the merge */
1782 738 : ItemPointer indexcursor = NULL;
1783 : ItemPointerData decoded;
1784 738 : bool tuplesort_empty = false;
1785 :
1786 : /*
1787 : * sanity checks
1788 : */
1789 : Assert(OidIsValid(indexRelation->rd_rel->relam));
1790 :
1791 : /*
1792 : * Need an EState for evaluation of index expressions and partial-index
1793 : * predicates. Also a slot to hold the current tuple.
1794 : */
1795 738 : estate = CreateExecutorState();
1796 738 : econtext = GetPerTupleExprContext(estate);
1797 738 : slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
1798 : &TTSOpsHeapTuple);
1799 :
1800 : /* Arrange for econtext's scan tuple to be the tuple under test */
1801 738 : econtext->ecxt_scantuple = slot;
1802 :
1803 : /* Set up execution state for predicate, if any. */
1804 738 : predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
1805 :
1806 : /*
1807 : * Prepare for scan of the base relation. We need just those tuples
1808 : * satisfying the passed-in reference snapshot. We must disable syncscan
1809 : * here, because it's critical that we read from block zero forward to
1810 : * match the sorted TIDs.
1811 : */
1812 738 : scan = table_beginscan_strat(heapRelation, /* relation */
1813 : snapshot, /* snapshot */
1814 : 0, /* number of keys */
1815 : NULL, /* scan key */
1816 : true, /* buffer access strategy OK */
1817 : false); /* syncscan not OK */
1818 738 : hscan = (HeapScanDesc) scan;
1819 :
1820 738 : pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_TOTAL,
1821 738 : hscan->rs_nblocks);
1822 :
1823 : /*
1824 : * Scan all tuples matching the snapshot.
1825 : */
1826 250458 : while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
1827 : {
1828 249720 : ItemPointer heapcursor = &heapTuple->t_self;
1829 : ItemPointerData rootTuple;
1830 : OffsetNumber root_offnum;
1831 :
1832 249720 : CHECK_FOR_INTERRUPTS();
1833 :
1834 249720 : state->htups += 1;
1835 :
1836 249720 : if ((previous_blkno == InvalidBlockNumber) ||
1837 249276 : (hscan->rs_cblock != previous_blkno))
1838 : {
1839 4722 : pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
1840 4722 : hscan->rs_cblock);
1841 4722 : previous_blkno = hscan->rs_cblock;
1842 : }
1843 :
1844 : /*
1845 : * As commented in table_index_build_scan, we should index heap-only
1846 : * tuples under the TIDs of their root tuples; so when we advance onto
1847 : * a new heap page, build a map of root item offsets on the page.
1848 : *
1849 : * This complicates merging against the tuplesort output: we will
1850 : * visit the live tuples in order by their offsets, but the root
1851 : * offsets that we need to compare against the index contents might be
1852 : * ordered differently. So we might have to "look back" within the
1853 : * tuplesort output, but only within the current page. We handle that
1854 : * by keeping a bool array in_index[] showing all the
1855 : * already-passed-over tuplesort output TIDs of the current page. We
1856 : * clear that array here, when advancing onto a new heap page.
1857 : */
1858 249720 : if (hscan->rs_cblock != root_blkno)
1859 : {
1860 4722 : Page page = BufferGetPage(hscan->rs_cbuf);
1861 :
1862 4722 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
1863 4722 : heap_get_root_tuples(page, root_offsets);
1864 4722 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1865 :
1866 4722 : memset(in_index, 0, sizeof(in_index));
1867 :
1868 4722 : root_blkno = hscan->rs_cblock;
1869 : }
1870 :
1871 : /* Convert actual tuple TID to root TID */
1872 249720 : rootTuple = *heapcursor;
1873 249720 : root_offnum = ItemPointerGetOffsetNumber(heapcursor);
1874 :
1875 249720 : if (HeapTupleIsHeapOnly(heapTuple))
1876 : {
1877 18 : root_offnum = root_offsets[root_offnum - 1];
1878 18 : if (!OffsetNumberIsValid(root_offnum))
1879 0 : ereport(ERROR,
1880 : (errcode(ERRCODE_DATA_CORRUPTED),
1881 : errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
1882 : ItemPointerGetBlockNumber(heapcursor),
1883 : ItemPointerGetOffsetNumber(heapcursor),
1884 : RelationGetRelationName(heapRelation))));
1885 18 : ItemPointerSetOffsetNumber(&rootTuple, root_offnum);
1886 : }
1887 :
1888 : /*
1889 : * "merge" by skipping through the index tuples until we find or pass
1890 : * the current root tuple.
1891 : */
1892 558084 : while (!tuplesort_empty &&
1893 557582 : (!indexcursor ||
1894 557582 : ItemPointerCompare(indexcursor, &rootTuple) < 0))
1895 : {
1896 : Datum ts_val;
1897 : bool ts_isnull;
1898 :
1899 308364 : if (indexcursor)
1900 : {
1901 : /*
1902 : * Remember index items seen earlier on the current heap page
1903 : */
1904 307920 : if (ItemPointerGetBlockNumber(indexcursor) == root_blkno)
1905 302134 : in_index[ItemPointerGetOffsetNumber(indexcursor) - 1] = true;
1906 : }
1907 :
1908 308364 : tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
1909 : false, &ts_val, &ts_isnull,
1910 308364 : NULL);
1911 : Assert(tuplesort_empty || !ts_isnull);
1912 308364 : if (!tuplesort_empty)
1913 : {
1914 308332 : itemptr_decode(&decoded, DatumGetInt64(ts_val));
1915 308332 : indexcursor = &decoded;
1916 : }
1917 : else
1918 : {
1919 : /* Be tidy */
1920 32 : indexcursor = NULL;
1921 : }
1922 : }
1923 :
1924 : /*
1925 : * If the tuplesort has overshot *and* we didn't see a match earlier,
1926 : * then this tuple is missing from the index, so insert it.
1927 : */
1928 499382 : if ((tuplesort_empty ||
1929 249662 : ItemPointerCompare(indexcursor, &rootTuple) > 0) &&
1930 148 : !in_index[root_offnum - 1])
1931 : {
1932 136 : MemoryContextReset(econtext->ecxt_per_tuple_memory);
1933 :
1934 : /* Set up for predicate or expression evaluation */
1935 136 : ExecStoreHeapTuple(heapTuple, slot, false);
1936 :
1937 : /*
1938 : * In a partial index, discard tuples that don't satisfy the
1939 : * predicate.
1940 : */
1941 136 : if (predicate != NULL)
1942 : {
1943 48 : if (!ExecQual(predicate, econtext))
1944 48 : continue;
1945 : }
1946 :
1947 : /*
1948 : * For the current heap tuple, extract all the attributes we use
1949 : * in this index, and note which are null. This also performs
1950 : * evaluation of any expressions needed.
1951 : */
1952 88 : FormIndexDatum(indexInfo,
1953 : slot,
1954 : estate,
1955 : values,
1956 : isnull);
1957 :
1958 : /*
1959 : * You'd think we should go ahead and build the index tuple here,
1960 : * but some index AMs want to do further processing on the data
1961 : * first. So pass the values[] and isnull[] arrays, instead.
1962 : */
1963 :
1964 : /*
1965 : * If the tuple is already committed dead, you might think we
1966 : * could suppress uniqueness checking, but this is no longer true
1967 : * in the presence of HOT, because the insert is actually a proxy
1968 : * for a uniqueness check on the whole HOT-chain. That is, the
1969 : * tuple we have here could be dead because it was already
1970 : * HOT-updated, and if so the updating transaction will not have
1971 : * thought it should insert index entries. The index AM will
1972 : * check the whole HOT-chain and correctly detect a conflict if
1973 : * there is one.
1974 : */
1975 :
1976 88 : index_insert(indexRelation,
1977 : values,
1978 : isnull,
1979 : &rootTuple,
1980 : heapRelation,
1981 88 : indexInfo->ii_Unique ?
1982 : UNIQUE_CHECK_YES : UNIQUE_CHECK_NO,
1983 : false,
1984 : indexInfo);
1985 :
1986 88 : state->tups_inserted += 1;
1987 : }
1988 : }
1989 :
1990 738 : table_endscan(scan);
1991 :
1992 738 : ExecDropSingleTupleTableSlot(slot);
1993 :
1994 738 : FreeExecutorState(estate);
1995 :
1996 : /* These may have been pointing to the now-gone estate */
1997 738 : indexInfo->ii_ExpressionsState = NIL;
1998 738 : indexInfo->ii_PredicateState = NULL;
1999 738 : }
2000 :
2001 : /*
2002 : * Return the number of blocks that have been read by this scan since
2003 : * starting. This is meant for progress reporting rather than be fully
2004 : * accurate: in a parallel scan, workers can be concurrently reading blocks
2005 : * further ahead than what we report.
2006 : */
2007 : static BlockNumber
2008 15022672 : heapam_scan_get_blocks_done(HeapScanDesc hscan)
2009 : {
2010 15022672 : ParallelBlockTableScanDesc bpscan = NULL;
2011 : BlockNumber startblock;
2012 : BlockNumber blocks_done;
2013 :
2014 15022672 : if (hscan->rs_base.rs_parallel != NULL)
2015 : {
2016 2173778 : bpscan = (ParallelBlockTableScanDesc) hscan->rs_base.rs_parallel;
2017 2173778 : startblock = bpscan->phs_startblock;
2018 : }
2019 : else
2020 12848894 : startblock = hscan->rs_startblock;
2021 :
2022 : /*
2023 : * Might have wrapped around the end of the relation, if startblock was
2024 : * not zero.
2025 : */
2026 15022672 : if (hscan->rs_cblock > startblock)
2027 14447922 : blocks_done = hscan->rs_cblock - startblock;
2028 : else
2029 : {
2030 : BlockNumber nblocks;
2031 :
2032 574750 : nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
2033 574750 : blocks_done = nblocks - startblock +
2034 574750 : hscan->rs_cblock;
2035 : }
2036 :
2037 15022672 : return blocks_done;
2038 : }
2039 :
2040 :
2041 : /* ------------------------------------------------------------------------
2042 : * Miscellaneous callbacks for the heap AM
2043 : * ------------------------------------------------------------------------
2044 : */
2045 :
2046 : /*
2047 : * Check to see whether the table needs a TOAST table. It does only if
2048 : * (1) there are any toastable attributes, and (2) the maximum length
2049 : * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
2050 : * create a toast table for something like "f1 varchar(20)".)
2051 : */
2052 : static bool
2053 46320 : heapam_relation_needs_toast_table(Relation rel)
2054 : {
2055 46320 : int32 data_length = 0;
2056 46320 : bool maxlength_unknown = false;
2057 46320 : bool has_toastable_attrs = false;
2058 46320 : TupleDesc tupdesc = rel->rd_att;
2059 : int32 tuple_length;
2060 : int i;
2061 :
2062 185036 : for (i = 0; i < tupdesc->natts; i++)
2063 : {
2064 138716 : Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2065 :
2066 138716 : if (att->attisdropped)
2067 1122 : continue;
2068 137594 : if (att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
2069 866 : continue;
2070 136728 : data_length = att_align_nominal(data_length, att->attalign);
2071 136728 : if (att->attlen > 0)
2072 : {
2073 : /* Fixed-length types are never toastable */
2074 102044 : data_length += att->attlen;
2075 : }
2076 : else
2077 : {
2078 34684 : int32 maxlen = type_maximum_size(att->atttypid,
2079 : att->atttypmod);
2080 :
2081 34684 : if (maxlen < 0)
2082 31514 : maxlength_unknown = true;
2083 : else
2084 3170 : data_length += maxlen;
2085 34684 : if (att->attstorage != TYPSTORAGE_PLAIN)
2086 33446 : has_toastable_attrs = true;
2087 : }
2088 : }
2089 46320 : if (!has_toastable_attrs)
2090 26556 : return false; /* nothing to toast? */
2091 19764 : if (maxlength_unknown)
2092 17414 : return true; /* any unlimited-length attrs? */
2093 2350 : tuple_length = MAXALIGN(SizeofHeapTupleHeader +
2094 2350 : BITMAPLEN(tupdesc->natts)) +
2095 2350 : MAXALIGN(data_length);
2096 2350 : return (tuple_length > TOAST_TUPLE_THRESHOLD);
2097 : }
2098 :
2099 : /*
2100 : * TOAST tables for heap relations are just heap relations.
2101 : */
2102 : static Oid
2103 17974 : heapam_relation_toast_am(Relation rel)
2104 : {
2105 17974 : return rel->rd_rel->relam;
2106 : }
2107 :
2108 :
2109 : /* ------------------------------------------------------------------------
2110 : * Planner related callbacks for the heap AM
2111 : * ------------------------------------------------------------------------
2112 : */
2113 :
2114 : #define HEAP_OVERHEAD_BYTES_PER_TUPLE \
2115 : (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))
2116 : #define HEAP_USABLE_BYTES_PER_PAGE \
2117 : (BLCKSZ - SizeOfPageHeaderData)
2118 :
2119 : static void
2120 464296 : heapam_estimate_rel_size(Relation rel, int32 *attr_widths,
2121 : BlockNumber *pages, double *tuples,
2122 : double *allvisfrac)
2123 : {
2124 464296 : table_block_relation_estimate_size(rel, attr_widths, pages,
2125 : tuples, allvisfrac,
2126 : HEAP_OVERHEAD_BYTES_PER_TUPLE,
2127 : HEAP_USABLE_BYTES_PER_PAGE);
2128 464296 : }
2129 :
2130 :
2131 : /* ------------------------------------------------------------------------
2132 : * Executor related callbacks for the heap AM
2133 : * ------------------------------------------------------------------------
2134 : */
2135 :
2136 : static bool
2137 6834706 : heapam_scan_bitmap_next_tuple(TableScanDesc scan,
2138 : TupleTableSlot *slot,
2139 : bool *recheck,
2140 : uint64 *lossy_pages,
2141 : uint64 *exact_pages)
2142 : {
2143 6834706 : BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
2144 6834706 : HeapScanDesc hscan = (HeapScanDesc) bscan;
2145 : OffsetNumber targoffset;
2146 : Page page;
2147 : ItemId lp;
2148 :
2149 : /*
2150 : * Out of range? If so, nothing more to look at on this page
2151 : */
2152 7240872 : while (hscan->rs_cindex >= hscan->rs_ntuples)
2153 : {
2154 : /*
2155 : * Returns false if the bitmap is exhausted and there are no further
2156 : * blocks we need to scan.
2157 : */
2158 432110 : if (!BitmapHeapScanNextBlock(scan, recheck, lossy_pages, exact_pages))
2159 25938 : return false;
2160 : }
2161 :
2162 6808762 : targoffset = hscan->rs_vistuples[hscan->rs_cindex];
2163 6808762 : page = BufferGetPage(hscan->rs_cbuf);
2164 6808762 : lp = PageGetItemId(page, targoffset);
2165 : Assert(ItemIdIsNormal(lp));
2166 :
2167 6808762 : hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2168 6808762 : hscan->rs_ctup.t_len = ItemIdGetLength(lp);
2169 6808762 : hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
2170 6808762 : ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
2171 :
2172 6808762 : pgstat_count_heap_fetch(scan->rs_rd);
2173 :
2174 : /*
2175 : * Set up the result slot to point to this tuple. Note that the slot
2176 : * acquires a pin on the buffer.
2177 : */
2178 6808762 : ExecStoreBufferHeapTuple(&hscan->rs_ctup,
2179 : slot,
2180 : hscan->rs_cbuf);
2181 :
2182 6808762 : hscan->rs_cindex++;
2183 :
2184 6808762 : return true;
2185 : }
2186 :
2187 : static bool
2188 12912 : heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate)
2189 : {
2190 12912 : HeapScanDesc hscan = (HeapScanDesc) scan;
2191 12912 : TsmRoutine *tsm = scanstate->tsmroutine;
2192 : BlockNumber blockno;
2193 :
2194 : /* return false immediately if relation is empty */
2195 12912 : if (hscan->rs_nblocks == 0)
2196 0 : return false;
2197 :
2198 : /* release previous scan buffer, if any */
2199 12912 : if (BufferIsValid(hscan->rs_cbuf))
2200 : {
2201 12736 : ReleaseBuffer(hscan->rs_cbuf);
2202 12736 : hscan->rs_cbuf = InvalidBuffer;
2203 : }
2204 :
2205 12912 : if (tsm->NextSampleBlock)
2206 4446 : blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
2207 : else
2208 : {
2209 : /* scanning table sequentially */
2210 :
2211 8466 : if (hscan->rs_cblock == InvalidBlockNumber)
2212 : {
2213 : Assert(!hscan->rs_inited);
2214 78 : blockno = hscan->rs_startblock;
2215 : }
2216 : else
2217 : {
2218 : Assert(hscan->rs_inited);
2219 :
2220 8388 : blockno = hscan->rs_cblock + 1;
2221 :
2222 8388 : if (blockno >= hscan->rs_nblocks)
2223 : {
2224 : /* wrap to beginning of rel, might not have started at 0 */
2225 78 : blockno = 0;
2226 : }
2227 :
2228 : /*
2229 : * Report our new scan position for synchronization purposes.
2230 : *
2231 : * Note: we do this before checking for end of scan so that the
2232 : * final state of the position hint is back at the start of the
2233 : * rel. That's not strictly necessary, but otherwise when you run
2234 : * the same query multiple times the starting position would shift
2235 : * a little bit backwards on every invocation, which is confusing.
2236 : * We don't guarantee any specific ordering in general, though.
2237 : */
2238 8388 : if (scan->rs_flags & SO_ALLOW_SYNC)
2239 0 : ss_report_location(scan->rs_rd, blockno);
2240 :
2241 8388 : if (blockno == hscan->rs_startblock)
2242 : {
2243 78 : blockno = InvalidBlockNumber;
2244 : }
2245 : }
2246 : }
2247 :
2248 12912 : hscan->rs_cblock = blockno;
2249 :
2250 12912 : if (!BlockNumberIsValid(blockno))
2251 : {
2252 170 : hscan->rs_inited = false;
2253 170 : return false;
2254 : }
2255 :
2256 : Assert(hscan->rs_cblock < hscan->rs_nblocks);
2257 :
2258 : /*
2259 : * Be sure to check for interrupts at least once per page. Checks at
2260 : * higher code levels won't be able to stop a sample scan that encounters
2261 : * many pages' worth of consecutive dead tuples.
2262 : */
2263 12742 : CHECK_FOR_INTERRUPTS();
2264 :
2265 : /* Read page using selected strategy */
2266 12742 : hscan->rs_cbuf = ReadBufferExtended(hscan->rs_base.rs_rd, MAIN_FORKNUM,
2267 : blockno, RBM_NORMAL, hscan->rs_strategy);
2268 :
2269 : /* in pagemode, prune the page and determine visible tuple offsets */
2270 12742 : if (hscan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
2271 8554 : heap_prepare_pagescan(scan);
2272 :
2273 12742 : hscan->rs_inited = true;
2274 12742 : return true;
2275 : }
2276 :
2277 : static bool
2278 253896 : heapam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate,
2279 : TupleTableSlot *slot)
2280 : {
2281 253896 : HeapScanDesc hscan = (HeapScanDesc) scan;
2282 253896 : TsmRoutine *tsm = scanstate->tsmroutine;
2283 253896 : BlockNumber blockno = hscan->rs_cblock;
2284 253896 : bool pagemode = (scan->rs_flags & SO_ALLOW_PAGEMODE) != 0;
2285 :
2286 : Page page;
2287 : bool all_visible;
2288 : OffsetNumber maxoffset;
2289 :
2290 : /*
2291 : * When not using pagemode, we must lock the buffer during tuple
2292 : * visibility checks.
2293 : */
2294 253896 : if (!pagemode)
2295 4194 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
2296 :
2297 253896 : page = BufferGetPage(hscan->rs_cbuf);
2298 506688 : all_visible = PageIsAllVisible(page) &&
2299 252792 : !scan->rs_snapshot->takenDuringRecovery;
2300 253896 : maxoffset = PageGetMaxOffsetNumber(page);
2301 :
2302 : for (;;)
2303 0 : {
2304 : OffsetNumber tupoffset;
2305 :
2306 253896 : CHECK_FOR_INTERRUPTS();
2307 :
2308 : /* Ask the tablesample method which tuples to check on this page. */
2309 253896 : tupoffset = tsm->NextSampleTuple(scanstate,
2310 : blockno,
2311 : maxoffset);
2312 :
2313 253896 : if (OffsetNumberIsValid(tupoffset))
2314 : {
2315 : ItemId itemid;
2316 : bool visible;
2317 241160 : HeapTuple tuple = &(hscan->rs_ctup);
2318 :
2319 : /* Skip invalid tuple pointers. */
2320 241160 : itemid = PageGetItemId(page, tupoffset);
2321 241160 : if (!ItemIdIsNormal(itemid))
2322 0 : continue;
2323 :
2324 241160 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
2325 241160 : tuple->t_len = ItemIdGetLength(itemid);
2326 241160 : ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
2327 :
2328 :
2329 241160 : if (all_visible)
2330 240348 : visible = true;
2331 : else
2332 812 : visible = SampleHeapTupleVisible(scan, hscan->rs_cbuf,
2333 : tuple, tupoffset);
2334 :
2335 : /* in pagemode, heap_prepare_pagescan did this for us */
2336 241160 : if (!pagemode)
2337 6 : HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
2338 : hscan->rs_cbuf, scan->rs_snapshot);
2339 :
2340 : /* Try next tuple from same page. */
2341 241160 : if (!visible)
2342 0 : continue;
2343 :
2344 : /* Found visible tuple, return it. */
2345 241160 : if (!pagemode)
2346 6 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
2347 :
2348 241160 : ExecStoreBufferHeapTuple(tuple, slot, hscan->rs_cbuf);
2349 :
2350 : /* Count successfully-fetched tuples as heap fetches */
2351 241160 : pgstat_count_heap_getnext(scan->rs_rd);
2352 :
2353 241160 : return true;
2354 : }
2355 : else
2356 : {
2357 : /*
2358 : * If we get here, it means we've exhausted the items on this page
2359 : * and it's time to move to the next.
2360 : */
2361 12736 : if (!pagemode)
2362 4188 : LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
2363 :
2364 12736 : ExecClearTuple(slot);
2365 12736 : return false;
2366 : }
2367 : }
2368 :
2369 : Assert(0);
2370 : }
2371 :
2372 :
2373 : /* ----------------------------------------------------------------------------
2374 : * Helper functions for the above.
2375 : * ----------------------------------------------------------------------------
2376 : */
2377 :
2378 : /*
2379 : * Reconstruct and rewrite the given tuple
2380 : *
2381 : * We cannot simply copy the tuple as-is, for several reasons:
2382 : *
2383 : * 1. We'd like to squeeze out the values of any dropped columns, both
2384 : * to save space and to ensure we have no corner-case failures. (It's
2385 : * possible for example that the new table hasn't got a TOAST table
2386 : * and so is unable to store any large values of dropped cols.)
2387 : *
2388 : * 2. The tuple might not even be legal for the new table; this is
2389 : * currently only known to happen as an after-effect of ALTER TABLE
2390 : * SET WITHOUT OIDS.
2391 : *
2392 : * So, we must reconstruct the tuple from component Datums.
2393 : */
2394 : static void
2395 728324 : reform_and_rewrite_tuple(HeapTuple tuple,
2396 : Relation OldHeap, Relation NewHeap,
2397 : Datum *values, bool *isnull, RewriteState rwstate)
2398 : {
2399 728324 : TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
2400 728324 : TupleDesc newTupDesc = RelationGetDescr(NewHeap);
2401 : HeapTuple copiedTuple;
2402 : int i;
2403 :
2404 728324 : heap_deform_tuple(tuple, oldTupDesc, values, isnull);
2405 :
2406 : /* Be sure to null out any dropped columns */
2407 6372306 : for (i = 0; i < newTupDesc->natts; i++)
2408 : {
2409 5643982 : if (TupleDescCompactAttr(newTupDesc, i)->attisdropped)
2410 0 : isnull[i] = true;
2411 : }
2412 :
2413 728324 : copiedTuple = heap_form_tuple(newTupDesc, values, isnull);
2414 :
2415 : /* The heap rewrite module does the rest */
2416 728324 : rewrite_heap_tuple(rwstate, tuple, copiedTuple);
2417 :
2418 728324 : heap_freetuple(copiedTuple);
2419 728324 : }
2420 :
2421 : /*
2422 : * Check visibility of the tuple.
2423 : */
2424 : static bool
2425 812 : SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
2426 : HeapTuple tuple,
2427 : OffsetNumber tupoffset)
2428 : {
2429 812 : HeapScanDesc hscan = (HeapScanDesc) scan;
2430 :
2431 812 : if (scan->rs_flags & SO_ALLOW_PAGEMODE)
2432 : {
2433 806 : uint32 start = 0,
2434 806 : end = hscan->rs_ntuples;
2435 :
2436 : /*
2437 : * In pageatatime mode, heap_prepare_pagescan() already did visibility
2438 : * checks, so just look at the info it left in rs_vistuples[].
2439 : *
2440 : * We use a binary search over the known-sorted array. Note: we could
2441 : * save some effort if we insisted that NextSampleTuple select tuples
2442 : * in increasing order, but it's not clear that there would be enough
2443 : * gain to justify the restriction.
2444 : */
2445 1556 : while (start < end)
2446 : {
2447 1556 : uint32 mid = start + (end - start) / 2;
2448 1556 : OffsetNumber curoffset = hscan->rs_vistuples[mid];
2449 :
2450 1556 : if (tupoffset == curoffset)
2451 806 : return true;
2452 750 : else if (tupoffset < curoffset)
2453 442 : end = mid;
2454 : else
2455 308 : start = mid + 1;
2456 : }
2457 :
2458 0 : return false;
2459 : }
2460 : else
2461 : {
2462 : /* Otherwise, we have to check the tuple individually. */
2463 6 : return HeapTupleSatisfiesVisibility(tuple, scan->rs_snapshot,
2464 : buffer);
2465 : }
2466 : }
2467 :
2468 : /*
2469 : * Helper function get the next block of a bitmap heap scan. Returns true when
2470 : * it got the next block and saved it in the scan descriptor and false when
2471 : * the bitmap and or relation are exhausted.
2472 : */
2473 : static bool
2474 432110 : BitmapHeapScanNextBlock(TableScanDesc scan,
2475 : bool *recheck,
2476 : uint64 *lossy_pages, uint64 *exact_pages)
2477 : {
2478 432110 : BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
2479 432110 : HeapScanDesc hscan = (HeapScanDesc) bscan;
2480 : BlockNumber block;
2481 : void *per_buffer_data;
2482 : Buffer buffer;
2483 : Snapshot snapshot;
2484 : int ntup;
2485 : TBMIterateResult *tbmres;
2486 : OffsetNumber offsets[TBM_MAX_TUPLES_PER_PAGE];
2487 432110 : int noffsets = -1;
2488 :
2489 : Assert(scan->rs_flags & SO_TYPE_BITMAPSCAN);
2490 : Assert(hscan->rs_read_stream);
2491 :
2492 432110 : hscan->rs_cindex = 0;
2493 432110 : hscan->rs_ntuples = 0;
2494 :
2495 : /* Release buffer containing previous block. */
2496 432110 : if (BufferIsValid(hscan->rs_cbuf))
2497 : {
2498 405780 : ReleaseBuffer(hscan->rs_cbuf);
2499 405780 : hscan->rs_cbuf = InvalidBuffer;
2500 : }
2501 :
2502 432110 : hscan->rs_cbuf = read_stream_next_buffer(hscan->rs_read_stream,
2503 : &per_buffer_data);
2504 :
2505 432110 : if (BufferIsInvalid(hscan->rs_cbuf))
2506 : {
2507 : /* the bitmap is exhausted */
2508 25938 : return false;
2509 : }
2510 :
2511 : Assert(per_buffer_data);
2512 :
2513 406172 : tbmres = per_buffer_data;
2514 :
2515 : Assert(BlockNumberIsValid(tbmres->blockno));
2516 : Assert(BufferGetBlockNumber(hscan->rs_cbuf) == tbmres->blockno);
2517 :
2518 : /* Exact pages need their tuple offsets extracted. */
2519 406172 : if (!tbmres->lossy)
2520 245782 : noffsets = tbm_extract_page_tuple(tbmres, offsets,
2521 : TBM_MAX_TUPLES_PER_PAGE);
2522 :
2523 406172 : *recheck = tbmres->recheck;
2524 :
2525 406172 : block = hscan->rs_cblock = tbmres->blockno;
2526 406172 : buffer = hscan->rs_cbuf;
2527 406172 : snapshot = scan->rs_snapshot;
2528 :
2529 406172 : ntup = 0;
2530 :
2531 : /*
2532 : * Prune and repair fragmentation for the whole page, if possible.
2533 : */
2534 406172 : heap_page_prune_opt(scan->rs_rd, buffer);
2535 :
2536 : /*
2537 : * We must hold share lock on the buffer content while examining tuple
2538 : * visibility. Afterwards, however, the tuples we have found to be
2539 : * visible are guaranteed good as long as we hold the buffer pin.
2540 : */
2541 406172 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
2542 :
2543 : /*
2544 : * We need two separate strategies for lossy and non-lossy cases.
2545 : */
2546 406172 : if (!tbmres->lossy)
2547 : {
2548 : /*
2549 : * Bitmap is non-lossy, so we just look through the offsets listed in
2550 : * tbmres; but we have to follow any HOT chain starting at each such
2551 : * offset.
2552 : */
2553 : int curslot;
2554 :
2555 : /* We must have extracted the tuple offsets by now */
2556 : Assert(noffsets > -1);
2557 :
2558 6066124 : for (curslot = 0; curslot < noffsets; curslot++)
2559 : {
2560 5820348 : OffsetNumber offnum = offsets[curslot];
2561 : ItemPointerData tid;
2562 : HeapTupleData heapTuple;
2563 :
2564 5820348 : ItemPointerSet(&tid, block, offnum);
2565 5820348 : if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
2566 : &heapTuple, NULL, true))
2567 5588006 : hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
2568 : }
2569 : }
2570 : else
2571 : {
2572 : /*
2573 : * Bitmap is lossy, so we must examine each line pointer on the page.
2574 : * But we can ignore HOT chains, since we'll check each tuple anyway.
2575 : */
2576 160390 : Page page = BufferGetPage(buffer);
2577 160390 : OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
2578 : OffsetNumber offnum;
2579 :
2580 1384660 : for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
2581 : {
2582 : ItemId lp;
2583 : HeapTupleData loctup;
2584 : bool valid;
2585 :
2586 1224270 : lp = PageGetItemId(page, offnum);
2587 1224270 : if (!ItemIdIsNormal(lp))
2588 0 : continue;
2589 1224270 : loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2590 1224270 : loctup.t_len = ItemIdGetLength(lp);
2591 1224270 : loctup.t_tableOid = scan->rs_rd->rd_id;
2592 1224270 : ItemPointerSet(&loctup.t_self, block, offnum);
2593 1224270 : valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
2594 1224270 : if (valid)
2595 : {
2596 1224144 : hscan->rs_vistuples[ntup++] = offnum;
2597 1224144 : PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
2598 1224144 : HeapTupleHeaderGetXmin(loctup.t_data));
2599 : }
2600 1224270 : HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
2601 : buffer, snapshot);
2602 : }
2603 : }
2604 :
2605 406166 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2606 :
2607 : Assert(ntup <= MaxHeapTuplesPerPage);
2608 406166 : hscan->rs_ntuples = ntup;
2609 :
2610 406166 : if (tbmres->lossy)
2611 160390 : (*lossy_pages)++;
2612 : else
2613 245776 : (*exact_pages)++;
2614 :
2615 : /*
2616 : * Return true to indicate that a valid block was found and the bitmap is
2617 : * not exhausted. If there are no visible tuples on this page,
2618 : * hscan->rs_ntuples will be 0 and heapam_scan_bitmap_next_tuple() will
2619 : * return false returning control to this function to advance to the next
2620 : * block in the bitmap.
2621 : */
2622 406166 : return true;
2623 : }
2624 :
2625 : /* ------------------------------------------------------------------------
2626 : * Definition of the heap table access method.
2627 : * ------------------------------------------------------------------------
2628 : */
2629 :
2630 : static const TableAmRoutine heapam_methods = {
2631 : .type = T_TableAmRoutine,
2632 :
2633 : .slot_callbacks = heapam_slot_callbacks,
2634 :
2635 : .scan_begin = heap_beginscan,
2636 : .scan_end = heap_endscan,
2637 : .scan_rescan = heap_rescan,
2638 : .scan_getnextslot = heap_getnextslot,
2639 :
2640 : .scan_set_tidrange = heap_set_tidrange,
2641 : .scan_getnextslot_tidrange = heap_getnextslot_tidrange,
2642 :
2643 : .parallelscan_estimate = table_block_parallelscan_estimate,
2644 : .parallelscan_initialize = table_block_parallelscan_initialize,
2645 : .parallelscan_reinitialize = table_block_parallelscan_reinitialize,
2646 :
2647 : .index_fetch_begin = heapam_index_fetch_begin,
2648 : .index_fetch_reset = heapam_index_fetch_reset,
2649 : .index_fetch_end = heapam_index_fetch_end,
2650 : .index_fetch_tuple = heapam_index_fetch_tuple,
2651 :
2652 : .tuple_insert = heapam_tuple_insert,
2653 : .tuple_insert_speculative = heapam_tuple_insert_speculative,
2654 : .tuple_complete_speculative = heapam_tuple_complete_speculative,
2655 : .multi_insert = heap_multi_insert,
2656 : .tuple_delete = heapam_tuple_delete,
2657 : .tuple_update = heapam_tuple_update,
2658 : .tuple_lock = heapam_tuple_lock,
2659 :
2660 : .tuple_fetch_row_version = heapam_fetch_row_version,
2661 : .tuple_get_latest_tid = heap_get_latest_tid,
2662 : .tuple_tid_valid = heapam_tuple_tid_valid,
2663 : .tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
2664 : .index_delete_tuples = heap_index_delete_tuples,
2665 :
2666 : .relation_set_new_filelocator = heapam_relation_set_new_filelocator,
2667 : .relation_nontransactional_truncate = heapam_relation_nontransactional_truncate,
2668 : .relation_copy_data = heapam_relation_copy_data,
2669 : .relation_copy_for_cluster = heapam_relation_copy_for_cluster,
2670 : .relation_vacuum = heap_vacuum_rel,
2671 : .scan_analyze_next_block = heapam_scan_analyze_next_block,
2672 : .scan_analyze_next_tuple = heapam_scan_analyze_next_tuple,
2673 : .index_build_range_scan = heapam_index_build_range_scan,
2674 : .index_validate_scan = heapam_index_validate_scan,
2675 :
2676 : .relation_size = table_block_relation_size,
2677 : .relation_needs_toast_table = heapam_relation_needs_toast_table,
2678 : .relation_toast_am = heapam_relation_toast_am,
2679 : .relation_fetch_toast_slice = heap_fetch_toast_slice,
2680 :
2681 : .relation_estimate_size = heapam_estimate_rel_size,
2682 :
2683 : .scan_bitmap_next_tuple = heapam_scan_bitmap_next_tuple,
2684 : .scan_sample_next_block = heapam_scan_sample_next_block,
2685 : .scan_sample_next_tuple = heapam_scan_sample_next_tuple
2686 : };
2687 :
2688 :
2689 : const TableAmRoutine *
2690 19908246 : GetHeapamTableAmRoutine(void)
2691 : {
2692 19908246 : return &heapam_methods;
2693 : }
2694 :
2695 : Datum
2696 2509822 : heap_tableam_handler(PG_FUNCTION_ARGS)
2697 : {
2698 2509822 : PG_RETURN_POINTER(&heapam_methods);
2699 : }
|