Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * heapam.c
4 : * heap access method code
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/heapam.c
12 : *
13 : *
14 : * INTERFACE ROUTINES
15 : * heap_beginscan - begin relation scan
16 : * heap_rescan - restart a relation scan
17 : * heap_endscan - end relation scan
18 : * heap_getnext - retrieve next tuple in scan
19 : * heap_fetch - retrieve tuple with given tid
20 : * heap_insert - insert tuple into a relation
21 : * heap_multi_insert - insert multiple tuples into a relation
22 : * heap_delete - delete a tuple from a relation
23 : * heap_update - replace a tuple in a relation with another tuple
24 : *
25 : * NOTES
26 : * This file contains the heap_ routines which implement
27 : * the POSTGRES heap access method used for all POSTGRES
28 : * relations.
29 : *
30 : *-------------------------------------------------------------------------
31 : */
32 : #include "postgres.h"
33 :
34 : #include "access/heapam.h"
35 : #include "access/heaptoast.h"
36 : #include "access/hio.h"
37 : #include "access/multixact.h"
38 : #include "access/subtrans.h"
39 : #include "access/syncscan.h"
40 : #include "access/valid.h"
41 : #include "access/visibilitymap.h"
42 : #include "access/xloginsert.h"
43 : #include "catalog/pg_database.h"
44 : #include "catalog/pg_database_d.h"
45 : #include "commands/vacuum.h"
46 : #include "pgstat.h"
47 : #include "port/pg_bitutils.h"
48 : #include "storage/lmgr.h"
49 : #include "storage/predicate.h"
50 : #include "storage/procarray.h"
51 : #include "utils/datum.h"
52 : #include "utils/injection_point.h"
53 : #include "utils/inval.h"
54 : #include "utils/spccache.h"
55 : #include "utils/syscache.h"
56 :
57 :
58 : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
59 : TransactionId xid, CommandId cid, int options);
60 : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
61 : Buffer newbuf, HeapTuple oldtup,
62 : HeapTuple newtup, HeapTuple old_key_tuple,
63 : bool all_visible_cleared, bool new_all_visible_cleared);
64 : #ifdef USE_ASSERT_CHECKING
65 : static void check_lock_if_inplace_updateable_rel(Relation relation,
66 : ItemPointer otid,
67 : HeapTuple newtup);
68 : static void check_inplace_rel_lock(HeapTuple oldtup);
69 : #endif
70 : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
71 : Bitmapset *interesting_cols,
72 : Bitmapset *external_cols,
73 : HeapTuple oldtup, HeapTuple newtup,
74 : bool *has_external);
75 : static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
76 : LockTupleMode mode, LockWaitPolicy wait_policy,
77 : bool *have_tuple_lock);
78 : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
79 : BlockNumber block,
80 : ScanDirection dir);
81 : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
82 : ScanDirection dir);
83 : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
84 : uint16 old_infomask2, TransactionId add_to_xmax,
85 : LockTupleMode mode, bool is_update,
86 : TransactionId *result_xmax, uint16 *result_infomask,
87 : uint16 *result_infomask2);
88 : static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
89 : ItemPointer ctid, TransactionId xid,
90 : LockTupleMode mode);
91 : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
92 : uint16 *new_infomask2);
93 : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
94 : uint16 t_infomask);
95 : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
96 : LockTupleMode lockmode, bool *current_is_member);
97 : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
98 : Relation rel, ItemPointer ctid, XLTW_Oper oper,
99 : int *remaining);
100 : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
101 : uint16 infomask, Relation rel, int *remaining,
102 : bool logLockFailure);
103 : static void index_delete_sort(TM_IndexDeleteOp *delstate);
104 : static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
105 : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
106 : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
107 : bool *copy);
108 :
109 :
110 : /*
111 : * Each tuple lock mode has a corresponding heavyweight lock, and one or two
112 : * corresponding MultiXactStatuses (one to merely lock tuples, another one to
113 : * update them). This table (and the macros below) helps us determine the
114 : * heavyweight lock mode and MultiXactStatus values to use for any particular
115 : * tuple lock strength.
116 : *
117 : * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
118 : *
119 : * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
120 : * instead.
121 : */
122 : static const struct
123 : {
124 : LOCKMODE hwlock;
125 : int lockstatus;
126 : int updstatus;
127 : }
128 :
129 : tupleLockExtraInfo[MaxLockTupleMode + 1] =
130 : {
131 : { /* LockTupleKeyShare */
132 : AccessShareLock,
133 : MultiXactStatusForKeyShare,
134 : -1 /* KeyShare does not allow updating tuples */
135 : },
136 : { /* LockTupleShare */
137 : RowShareLock,
138 : MultiXactStatusForShare,
139 : -1 /* Share does not allow updating tuples */
140 : },
141 : { /* LockTupleNoKeyExclusive */
142 : ExclusiveLock,
143 : MultiXactStatusForNoKeyUpdate,
144 : MultiXactStatusNoKeyUpdate
145 : },
146 : { /* LockTupleExclusive */
147 : AccessExclusiveLock,
148 : MultiXactStatusForUpdate,
149 : MultiXactStatusUpdate
150 : }
151 : };
152 :
153 : /* Get the LOCKMODE for a given MultiXactStatus */
154 : #define LOCKMODE_from_mxstatus(status) \
155 : (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
156 :
157 : /*
158 : * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
159 : * This is more readable than having every caller translate it to lock.h's
160 : * LOCKMODE.
161 : */
162 : #define LockTupleTuplock(rel, tup, mode) \
163 : LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
164 : #define UnlockTupleTuplock(rel, tup, mode) \
165 : UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
166 : #define ConditionalLockTupleTuplock(rel, tup, mode, log) \
167 : ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
168 :
169 : #ifdef USE_PREFETCH
170 : /*
171 : * heap_index_delete_tuples and index_delete_prefetch_buffer use this
172 : * structure to coordinate prefetching activity
173 : */
174 : typedef struct
175 : {
176 : BlockNumber cur_hblkno;
177 : int next_item;
178 : int ndeltids;
179 : TM_IndexDelete *deltids;
180 : } IndexDeletePrefetchState;
181 : #endif
182 :
183 : /* heap_index_delete_tuples bottom-up index deletion costing constants */
184 : #define BOTTOMUP_MAX_NBLOCKS 6
185 : #define BOTTOMUP_TOLERANCE_NBLOCKS 3
186 :
187 : /*
188 : * heap_index_delete_tuples uses this when determining which heap blocks it
189 : * must visit to help its bottom-up index deletion caller
190 : */
191 : typedef struct IndexDeleteCounts
192 : {
193 : int16 npromisingtids; /* Number of "promising" TIDs in group */
194 : int16 ntids; /* Number of TIDs in group */
195 : int16 ifirsttid; /* Offset to group's first deltid */
196 : } IndexDeleteCounts;
197 :
198 : /*
199 : * This table maps tuple lock strength values for each particular
200 : * MultiXactStatus value.
201 : */
202 : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
203 : {
204 : LockTupleKeyShare, /* ForKeyShare */
205 : LockTupleShare, /* ForShare */
206 : LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
207 : LockTupleExclusive, /* ForUpdate */
208 : LockTupleNoKeyExclusive, /* NoKeyUpdate */
209 : LockTupleExclusive /* Update */
210 : };
211 :
212 : /* Get the LockTupleMode for a given MultiXactStatus */
213 : #define TUPLOCK_from_mxstatus(status) \
214 : (MultiXactStatusLock[(status)])
215 :
216 : /*
217 : * Check that we have a valid snapshot if we might need TOAST access.
218 : */
219 : static inline void
220 20947800 : AssertHasSnapshotForToast(Relation rel)
221 : {
222 : #ifdef USE_ASSERT_CHECKING
223 :
224 : /* bootstrap mode in particular breaks this rule */
225 : if (!IsNormalProcessingMode())
226 : return;
227 :
228 : /* if the relation doesn't have a TOAST table, we are good */
229 : if (!OidIsValid(rel->rd_rel->reltoastrelid))
230 : return;
231 :
232 : Assert(HaveRegisteredOrActiveSnapshot());
233 :
234 : #endif /* USE_ASSERT_CHECKING */
235 20947800 : }
236 :
237 : /* ----------------------------------------------------------------
238 : * heap support routines
239 : * ----------------------------------------------------------------
240 : */
241 :
242 : /*
243 : * Streaming read API callback for parallel sequential scans. Returns the next
244 : * block the caller wants from the read stream or InvalidBlockNumber when done.
245 : */
246 : static BlockNumber
247 201574 : heap_scan_stream_read_next_parallel(ReadStream *stream,
248 : void *callback_private_data,
249 : void *per_buffer_data)
250 : {
251 201574 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
252 :
253 : Assert(ScanDirectionIsForward(scan->rs_dir));
254 : Assert(scan->rs_base.rs_parallel);
255 :
256 201574 : if (unlikely(!scan->rs_inited))
257 : {
258 : /* parallel scan */
259 2992 : table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
260 2992 : scan->rs_parallelworkerdata,
261 2992 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
262 :
263 : /* may return InvalidBlockNumber if there are no more blocks */
264 5984 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
265 2992 : scan->rs_parallelworkerdata,
266 2992 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
267 2992 : scan->rs_inited = true;
268 : }
269 : else
270 : {
271 198582 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
272 198582 : scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
273 198582 : scan->rs_base.rs_parallel);
274 : }
275 :
276 201574 : return scan->rs_prefetch_block;
277 : }
278 :
279 : /*
280 : * Streaming read API callback for serial sequential and TID range scans.
281 : * Returns the next block the caller wants from the read stream or
282 : * InvalidBlockNumber when done.
283 : */
284 : static BlockNumber
285 7511362 : heap_scan_stream_read_next_serial(ReadStream *stream,
286 : void *callback_private_data,
287 : void *per_buffer_data)
288 : {
289 7511362 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
290 :
291 7511362 : if (unlikely(!scan->rs_inited))
292 : {
293 1949916 : scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
294 1949916 : scan->rs_inited = true;
295 : }
296 : else
297 5561446 : scan->rs_prefetch_block = heapgettup_advance_block(scan,
298 : scan->rs_prefetch_block,
299 : scan->rs_dir);
300 :
301 7511362 : return scan->rs_prefetch_block;
302 : }
303 :
304 : /*
305 : * Read stream API callback for bitmap heap scans.
306 : * Returns the next block the caller wants from the read stream or
307 : * InvalidBlockNumber when done.
308 : */
309 : static BlockNumber
310 420024 : bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
311 : void *per_buffer_data)
312 : {
313 420024 : TBMIterateResult *tbmres = per_buffer_data;
314 420024 : BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
315 420024 : HeapScanDesc hscan = (HeapScanDesc) bscan;
316 420024 : TableScanDesc sscan = &hscan->rs_base;
317 :
318 : for (;;)
319 : {
320 420024 : CHECK_FOR_INTERRUPTS();
321 :
322 : /* no more entries in the bitmap */
323 420024 : if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
324 20356 : return InvalidBlockNumber;
325 :
326 : /*
327 : * Ignore any claimed entries past what we think is the end of the
328 : * relation. It may have been extended after the start of our scan (we
329 : * only hold an AccessShareLock, and it could be inserts from this
330 : * backend). We don't take this optimization in SERIALIZABLE
331 : * isolation though, as we need to examine all invisible tuples
332 : * reachable by the index.
333 : */
334 399668 : if (!IsolationIsSerializable() &&
335 399450 : tbmres->blockno >= hscan->rs_nblocks)
336 0 : continue;
337 :
338 399668 : return tbmres->blockno;
339 : }
340 :
341 : /* not reachable */
342 : Assert(false);
343 : }
344 :
345 : /* ----------------
346 : * initscan - scan code common to heap_beginscan and heap_rescan
347 : * ----------------
348 : */
349 : static void
350 1992800 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
351 : {
352 1992800 : ParallelBlockTableScanDesc bpscan = NULL;
353 : bool allow_strat;
354 : bool allow_sync;
355 :
356 : /*
357 : * Determine the number of blocks we have to scan.
358 : *
359 : * It is sufficient to do this once at scan start, since any tuples added
360 : * while the scan is in progress will be invisible to my snapshot anyway.
361 : * (That is not true when using a non-MVCC snapshot. However, we couldn't
362 : * guarantee to return tuples added after scan start anyway, since they
363 : * might go into pages we already scanned. To guarantee consistent
364 : * results for a non-MVCC snapshot, the caller must hold some higher-level
365 : * lock that ensures the interesting tuple(s) won't change.)
366 : */
367 1992800 : if (scan->rs_base.rs_parallel != NULL)
368 : {
369 4016 : bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
370 4016 : scan->rs_nblocks = bpscan->phs_nblocks;
371 : }
372 : else
373 1988784 : scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
374 :
375 : /*
376 : * If the table is large relative to NBuffers, use a bulk-read access
377 : * strategy and enable synchronized scanning (see syncscan.c). Although
378 : * the thresholds for these features could be different, we make them the
379 : * same so that there are only two behaviors to tune rather than four.
380 : * (However, some callers need to be able to disable one or both of these
381 : * behaviors, independently of the size of the table; also there is a GUC
382 : * variable that can disable synchronized scanning.)
383 : *
384 : * Note that table_block_parallelscan_initialize has a very similar test;
385 : * if you change this, consider changing that one, too.
386 : */
387 1992796 : if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
388 1978412 : scan->rs_nblocks > NBuffers / 4)
389 : {
390 28072 : allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
391 28072 : allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
392 : }
393 : else
394 1964724 : allow_strat = allow_sync = false;
395 :
396 1992796 : if (allow_strat)
397 : {
398 : /* During a rescan, keep the previous strategy object. */
399 25468 : if (scan->rs_strategy == NULL)
400 25102 : scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
401 : }
402 : else
403 : {
404 1967328 : if (scan->rs_strategy != NULL)
405 0 : FreeAccessStrategy(scan->rs_strategy);
406 1967328 : scan->rs_strategy = NULL;
407 : }
408 :
409 1992796 : if (scan->rs_base.rs_parallel != NULL)
410 : {
411 : /* For parallel scan, believe whatever ParallelTableScanDesc says. */
412 4016 : if (scan->rs_base.rs_parallel->phs_syncscan)
413 4 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
414 : else
415 4012 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
416 : }
417 1988780 : else if (keep_startblock)
418 : {
419 : /*
420 : * When rescanning, we want to keep the previous startblock setting,
421 : * so that rewinding a cursor doesn't generate surprising results.
422 : * Reset the active syncscan setting, though.
423 : */
424 1223472 : if (allow_sync && synchronize_seqscans)
425 100 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
426 : else
427 1223372 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
428 : }
429 765308 : else if (allow_sync && synchronize_seqscans)
430 : {
431 144 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
432 144 : scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
433 : }
434 : else
435 : {
436 765164 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
437 765164 : scan->rs_startblock = 0;
438 : }
439 :
440 1992796 : scan->rs_numblocks = InvalidBlockNumber;
441 1992796 : scan->rs_inited = false;
442 1992796 : scan->rs_ctup.t_data = NULL;
443 1992796 : ItemPointerSetInvalid(&scan->rs_ctup.t_self);
444 1992796 : scan->rs_cbuf = InvalidBuffer;
445 1992796 : scan->rs_cblock = InvalidBlockNumber;
446 1992796 : scan->rs_ntuples = 0;
447 1992796 : scan->rs_cindex = 0;
448 :
449 : /*
450 : * Initialize to ForwardScanDirection because it is most common and
451 : * because heap scans go forward before going backward (e.g. CURSORs).
452 : */
453 1992796 : scan->rs_dir = ForwardScanDirection;
454 1992796 : scan->rs_prefetch_block = InvalidBlockNumber;
455 :
456 : /* page-at-a-time fields are always invalid when not rs_inited */
457 :
458 : /*
459 : * copy the scan key, if appropriate
460 : */
461 1992796 : if (key != NULL && scan->rs_base.rs_nkeys > 0)
462 441744 : memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
463 :
464 : /*
465 : * Currently, we only have a stats counter for sequential heap scans (but
466 : * e.g for bitmap scans the underlying bitmap index scans will be counted,
467 : * and for sample scans we update stats for tuple fetches).
468 : */
469 1992796 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
470 1952260 : pgstat_count_heap_scan(scan->rs_base.rs_rd);
471 1992796 : }
472 :
473 : /*
474 : * heap_setscanlimits - restrict range of a heapscan
475 : *
476 : * startBlk is the page to start at
477 : * numBlks is number of pages to scan (InvalidBlockNumber means "all")
478 : */
479 : void
480 5614 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
481 : {
482 5614 : HeapScanDesc scan = (HeapScanDesc) sscan;
483 :
484 : Assert(!scan->rs_inited); /* else too late to change */
485 : /* else rs_startblock is significant */
486 : Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
487 :
488 : /* Check startBlk is valid (but allow case of zero blocks...) */
489 : Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
490 :
491 5614 : scan->rs_startblock = startBlk;
492 5614 : scan->rs_numblocks = numBlks;
493 5614 : }
494 :
495 : /*
496 : * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
497 : * multiple times, with constant arguments for all_visible,
498 : * check_serializable.
499 : */
500 : pg_attribute_always_inline
501 : static int
502 5536742 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
503 : Page page, Buffer buffer,
504 : BlockNumber block, int lines,
505 : bool all_visible, bool check_serializable)
506 : {
507 5536742 : int ntup = 0;
508 : OffsetNumber lineoff;
509 :
510 275685484 : for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
511 : {
512 270148758 : ItemId lpp = PageGetItemId(page, lineoff);
513 : HeapTupleData loctup;
514 : bool valid;
515 :
516 270148758 : if (!ItemIdIsNormal(lpp))
517 54070600 : continue;
518 :
519 216078158 : loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
520 216078158 : loctup.t_len = ItemIdGetLength(lpp);
521 216078158 : loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
522 216078158 : ItemPointerSet(&(loctup.t_self), block, lineoff);
523 :
524 216078158 : if (all_visible)
525 88088894 : valid = true;
526 : else
527 127989264 : valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
528 :
529 216078158 : if (check_serializable)
530 2818 : HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
531 : &loctup, buffer, snapshot);
532 :
533 216078142 : if (valid)
534 : {
535 200667898 : scan->rs_vistuples[ntup] = lineoff;
536 200667898 : ntup++;
537 : }
538 : }
539 :
540 : Assert(ntup <= MaxHeapTuplesPerPage);
541 :
542 5536726 : return ntup;
543 : }
544 :
545 : /*
546 : * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
547 : *
548 : * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
549 : * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
550 : */
551 : void
552 5536742 : heap_prepare_pagescan(TableScanDesc sscan)
553 : {
554 5536742 : HeapScanDesc scan = (HeapScanDesc) sscan;
555 5536742 : Buffer buffer = scan->rs_cbuf;
556 5536742 : BlockNumber block = scan->rs_cblock;
557 : Snapshot snapshot;
558 : Page page;
559 : int lines;
560 : bool all_visible;
561 : bool check_serializable;
562 :
563 : Assert(BufferGetBlockNumber(buffer) == block);
564 :
565 : /* ensure we're not accidentally being used when not in pagemode */
566 : Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
567 5536742 : snapshot = scan->rs_base.rs_snapshot;
568 :
569 : /*
570 : * Prune and repair fragmentation for the whole page, if possible.
571 : */
572 5536742 : heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
573 :
574 : /*
575 : * We must hold share lock on the buffer content while examining tuple
576 : * visibility. Afterwards, however, the tuples we have found to be
577 : * visible are guaranteed good as long as we hold the buffer pin.
578 : */
579 5536742 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
580 :
581 5536742 : page = BufferGetPage(buffer);
582 5536742 : lines = PageGetMaxOffsetNumber(page);
583 :
584 : /*
585 : * If the all-visible flag indicates that all tuples on the page are
586 : * visible to everyone, we can skip the per-tuple visibility tests.
587 : *
588 : * Note: In hot standby, a tuple that's already visible to all
589 : * transactions on the primary might still be invisible to a read-only
590 : * transaction in the standby. We partly handle this problem by tracking
591 : * the minimum xmin of visible tuples as the cut-off XID while marking a
592 : * page all-visible on the primary and WAL log that along with the
593 : * visibility map SET operation. In hot standby, we wait for (or abort)
594 : * all transactions that can potentially may not see one or more tuples on
595 : * the page. That's how index-only scans work fine in hot standby. A
596 : * crucial difference between index-only scans and heap scans is that the
597 : * index-only scan completely relies on the visibility map where as heap
598 : * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
599 : * the page-level flag can be trusted in the same way, because it might
600 : * get propagated somehow without being explicitly WAL-logged, e.g. via a
601 : * full page write. Until we can prove that beyond doubt, let's check each
602 : * tuple for visibility the hard way.
603 : */
604 5536742 : all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
605 : check_serializable =
606 5536742 : CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
607 :
608 : /*
609 : * We call page_collect_tuples() with constant arguments, to get the
610 : * compiler to constant fold the constant arguments. Separate calls with
611 : * constant arguments, rather than variables, are needed on several
612 : * compilers to actually perform constant folding.
613 : */
614 5536742 : if (likely(all_visible))
615 : {
616 2161796 : if (likely(!check_serializable))
617 2161796 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
618 : block, lines, true, false);
619 : else
620 0 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
621 : block, lines, true, true);
622 : }
623 : else
624 : {
625 3374946 : if (likely(!check_serializable))
626 3373700 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
627 : block, lines, false, false);
628 : else
629 1246 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
630 : block, lines, false, true);
631 : }
632 :
633 5536726 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
634 5536726 : }
635 :
636 : /*
637 : * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
638 : *
639 : * Read the next block of the scan relation from the read stream and save it
640 : * in the scan descriptor. It is already pinned.
641 : */
642 : static inline void
643 7343540 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
644 : {
645 : Assert(scan->rs_read_stream);
646 :
647 : /* release previous scan buffer, if any */
648 7343540 : if (BufferIsValid(scan->rs_cbuf))
649 : {
650 5390628 : ReleaseBuffer(scan->rs_cbuf);
651 5390628 : scan->rs_cbuf = InvalidBuffer;
652 : }
653 :
654 : /*
655 : * Be sure to check for interrupts at least once per page. Checks at
656 : * higher code levels won't be able to stop a seqscan that encounters many
657 : * pages' worth of consecutive dead tuples.
658 : */
659 7343540 : CHECK_FOR_INTERRUPTS();
660 :
661 : /*
662 : * If the scan direction is changing, reset the prefetch block to the
663 : * current block. Otherwise, we will incorrectly prefetch the blocks
664 : * between the prefetch block and the current block again before
665 : * prefetching blocks in the new, correct scan direction.
666 : */
667 7343536 : if (unlikely(scan->rs_dir != dir))
668 : {
669 154 : scan->rs_prefetch_block = scan->rs_cblock;
670 154 : read_stream_reset(scan->rs_read_stream);
671 : }
672 :
673 7343536 : scan->rs_dir = dir;
674 :
675 7343536 : scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
676 7343486 : if (BufferIsValid(scan->rs_cbuf))
677 5717892 : scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
678 7343486 : }
679 :
680 : /*
681 : * heapgettup_initial_block - return the first BlockNumber to scan
682 : *
683 : * Returns InvalidBlockNumber when there are no blocks to scan. This can
684 : * occur with empty tables and in parallel scans when parallel workers get all
685 : * of the pages before we can get a chance to get our first page.
686 : */
687 : static pg_noinline BlockNumber
688 1949916 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
689 : {
690 : Assert(!scan->rs_inited);
691 : Assert(scan->rs_base.rs_parallel == NULL);
692 :
693 : /* When there are no pages to scan, return InvalidBlockNumber */
694 1949916 : if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
695 945206 : return InvalidBlockNumber;
696 :
697 1004710 : if (ScanDirectionIsForward(dir))
698 : {
699 1004646 : return scan->rs_startblock;
700 : }
701 : else
702 : {
703 : /*
704 : * Disable reporting to syncscan logic in a backwards scan; it's not
705 : * very likely anyone else is doing the same thing at the same time,
706 : * and much more likely that we'll just bollix things for forward
707 : * scanners.
708 : */
709 64 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
710 :
711 : /*
712 : * Start from last page of the scan. Ensure we take into account
713 : * rs_numblocks if it's been adjusted by heap_setscanlimits().
714 : */
715 64 : if (scan->rs_numblocks != InvalidBlockNumber)
716 6 : return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
717 :
718 58 : if (scan->rs_startblock > 0)
719 0 : return scan->rs_startblock - 1;
720 :
721 58 : return scan->rs_nblocks - 1;
722 : }
723 : }
724 :
725 :
726 : /*
727 : * heapgettup_start_page - helper function for heapgettup()
728 : *
729 : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
730 : * to the number of tuples on this page. Also set *lineoff to the first
731 : * offset to scan with forward scans getting the first offset and backward
732 : * getting the final offset on the page.
733 : */
734 : static Page
735 189704 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
736 : OffsetNumber *lineoff)
737 : {
738 : Page page;
739 :
740 : Assert(scan->rs_inited);
741 : Assert(BufferIsValid(scan->rs_cbuf));
742 :
743 : /* Caller is responsible for ensuring buffer is locked if needed */
744 189704 : page = BufferGetPage(scan->rs_cbuf);
745 :
746 189704 : *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
747 :
748 189704 : if (ScanDirectionIsForward(dir))
749 189704 : *lineoff = FirstOffsetNumber;
750 : else
751 0 : *lineoff = (OffsetNumber) (*linesleft);
752 :
753 : /* lineoff now references the physically previous or next tid */
754 189704 : return page;
755 : }
756 :
757 :
758 : /*
759 : * heapgettup_continue_page - helper function for heapgettup()
760 : *
761 : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
762 : * to the number of tuples left to scan on this page. Also set *lineoff to
763 : * the next offset to scan according to the ScanDirection in 'dir'.
764 : */
765 : static inline Page
766 15692642 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
767 : OffsetNumber *lineoff)
768 : {
769 : Page page;
770 :
771 : Assert(scan->rs_inited);
772 : Assert(BufferIsValid(scan->rs_cbuf));
773 :
774 : /* Caller is responsible for ensuring buffer is locked if needed */
775 15692642 : page = BufferGetPage(scan->rs_cbuf);
776 :
777 15692642 : if (ScanDirectionIsForward(dir))
778 : {
779 15692642 : *lineoff = OffsetNumberNext(scan->rs_coffset);
780 15692642 : *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
781 : }
782 : else
783 : {
784 : /*
785 : * The previous returned tuple may have been vacuumed since the
786 : * previous scan when we use a non-MVCC snapshot, so we must
787 : * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
788 : */
789 0 : *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
790 0 : *linesleft = *lineoff;
791 : }
792 :
793 : /* lineoff now references the physically previous or next tid */
794 15692642 : return page;
795 : }
796 :
797 : /*
798 : * heapgettup_advance_block - helper for heap_fetch_next_buffer()
799 : *
800 : * Given the current block number, the scan direction, and various information
801 : * contained in the scan descriptor, calculate the BlockNumber to scan next
802 : * and return it. If there are no further blocks to scan, return
803 : * InvalidBlockNumber to indicate this fact to the caller.
804 : *
805 : * This should not be called to determine the initial block number -- only for
806 : * subsequent blocks.
807 : *
808 : * This also adjusts rs_numblocks when a limit has been imposed by
809 : * heap_setscanlimits().
810 : */
811 : static inline BlockNumber
812 5561446 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
813 : {
814 : Assert(scan->rs_base.rs_parallel == NULL);
815 :
816 5561446 : if (likely(ScanDirectionIsForward(dir)))
817 : {
818 5561328 : block++;
819 :
820 : /* wrap back to the start of the heap */
821 5561328 : if (block >= scan->rs_nblocks)
822 802522 : block = 0;
823 :
824 : /*
825 : * Report our new scan position for synchronization purposes. We don't
826 : * do that when moving backwards, however. That would just mess up any
827 : * other forward-moving scanners.
828 : *
829 : * Note: we do this before checking for end of scan so that the final
830 : * state of the position hint is back at the start of the rel. That's
831 : * not strictly necessary, but otherwise when you run the same query
832 : * multiple times the starting position would shift a little bit
833 : * backwards on every invocation, which is confusing. We don't
834 : * guarantee any specific ordering in general, though.
835 : */
836 5561328 : if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
837 22530 : ss_report_location(scan->rs_base.rs_rd, block);
838 :
839 : /* we're done if we're back at where we started */
840 5561328 : if (block == scan->rs_startblock)
841 802440 : return InvalidBlockNumber;
842 :
843 : /* check if the limit imposed by heap_setscanlimits() is met */
844 4758888 : if (scan->rs_numblocks != InvalidBlockNumber)
845 : {
846 4932 : if (--scan->rs_numblocks == 0)
847 3056 : return InvalidBlockNumber;
848 : }
849 :
850 4755832 : return block;
851 : }
852 : else
853 : {
854 : /* we're done if the last block is the start position */
855 118 : if (block == scan->rs_startblock)
856 118 : return InvalidBlockNumber;
857 :
858 : /* check if the limit imposed by heap_setscanlimits() is met */
859 0 : if (scan->rs_numblocks != InvalidBlockNumber)
860 : {
861 0 : if (--scan->rs_numblocks == 0)
862 0 : return InvalidBlockNumber;
863 : }
864 :
865 : /* wrap to the end of the heap when the last page was page 0 */
866 0 : if (block == 0)
867 0 : block = scan->rs_nblocks;
868 :
869 0 : block--;
870 :
871 0 : return block;
872 : }
873 : }
874 :
875 : /* ----------------
876 : * heapgettup - fetch next heap tuple
877 : *
878 : * Initialize the scan if not already done; then advance to the next
879 : * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
880 : * or set scan->rs_ctup.t_data = NULL if no more tuples.
881 : *
882 : * Note: the reason nkeys/key are passed separately, even though they are
883 : * kept in the scan descriptor, is that the caller may not want us to check
884 : * the scankeys.
885 : *
886 : * Note: when we fall off the end of the scan in either direction, we
887 : * reset rs_inited. This means that a further request with the same
888 : * scan direction will restart the scan, which is a bit odd, but a
889 : * request with the opposite scan direction will start a fresh scan
890 : * in the proper direction. The latter is required behavior for cursors,
891 : * while the former case is generally undefined behavior in Postgres
892 : * so we don't care too much.
893 : * ----------------
894 : */
895 : static void
896 15733460 : heapgettup(HeapScanDesc scan,
897 : ScanDirection dir,
898 : int nkeys,
899 : ScanKey key)
900 : {
901 15733460 : HeapTuple tuple = &(scan->rs_ctup);
902 : Page page;
903 : OffsetNumber lineoff;
904 : int linesleft;
905 :
906 15733460 : if (likely(scan->rs_inited))
907 : {
908 : /* continue from previously returned page/tuple */
909 15692642 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
910 15692642 : page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
911 15692642 : goto continue_page;
912 : }
913 :
914 : /*
915 : * advance the scan until we find a qualifying tuple or run out of stuff
916 : * to scan
917 : */
918 : while (true)
919 : {
920 230226 : heap_fetch_next_buffer(scan, dir);
921 :
922 : /* did we run out of blocks to scan? */
923 230226 : if (!BufferIsValid(scan->rs_cbuf))
924 40522 : break;
925 :
926 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
927 :
928 189704 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
929 189704 : page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
930 15882346 : continue_page:
931 :
932 : /*
933 : * Only continue scanning the page while we have lines left.
934 : *
935 : * Note that this protects us from accessing line pointers past
936 : * PageGetMaxOffsetNumber(); both for forward scans when we resume the
937 : * table scan, and for when we start scanning a new page.
938 : */
939 15965640 : for (; linesleft > 0; linesleft--, lineoff += dir)
940 : {
941 : bool visible;
942 15776232 : ItemId lpp = PageGetItemId(page, lineoff);
943 :
944 15776232 : if (!ItemIdIsNormal(lpp))
945 72858 : continue;
946 :
947 15703374 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
948 15703374 : tuple->t_len = ItemIdGetLength(lpp);
949 15703374 : ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
950 :
951 15703374 : visible = HeapTupleSatisfiesVisibility(tuple,
952 : scan->rs_base.rs_snapshot,
953 : scan->rs_cbuf);
954 :
955 15703374 : HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
956 : tuple, scan->rs_cbuf,
957 : scan->rs_base.rs_snapshot);
958 :
959 : /* skip tuples not visible to this snapshot */
960 15703374 : if (!visible)
961 10436 : continue;
962 :
963 : /* skip any tuples that don't match the scan key */
964 15692938 : if (key != NULL &&
965 0 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
966 : nkeys, key))
967 0 : continue;
968 :
969 15692938 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
970 15692938 : scan->rs_coffset = lineoff;
971 15692938 : return;
972 : }
973 :
974 : /*
975 : * if we get here, it means we've exhausted the items on this page and
976 : * it's time to move to the next.
977 : */
978 189408 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
979 : }
980 :
981 : /* end of scan */
982 40522 : if (BufferIsValid(scan->rs_cbuf))
983 0 : ReleaseBuffer(scan->rs_cbuf);
984 :
985 40522 : scan->rs_cbuf = InvalidBuffer;
986 40522 : scan->rs_cblock = InvalidBlockNumber;
987 40522 : scan->rs_prefetch_block = InvalidBlockNumber;
988 40522 : tuple->t_data = NULL;
989 40522 : scan->rs_inited = false;
990 : }
991 :
992 : /* ----------------
993 : * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
994 : *
995 : * Same API as heapgettup, but used in page-at-a-time mode
996 : *
997 : * The internal logic is much the same as heapgettup's too, but there are some
998 : * differences: we do not take the buffer content lock (that only needs to
999 : * happen inside heap_prepare_pagescan), and we iterate through just the
1000 : * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1001 : * that lineindex is 0-based, where the corresponding loop variable lineoff in
1002 : * heapgettup is 1-based.
1003 : * ----------------
1004 : */
1005 : static void
1006 99077338 : heapgettup_pagemode(HeapScanDesc scan,
1007 : ScanDirection dir,
1008 : int nkeys,
1009 : ScanKey key)
1010 : {
1011 99077338 : HeapTuple tuple = &(scan->rs_ctup);
1012 : Page page;
1013 : uint32 lineindex;
1014 : uint32 linesleft;
1015 :
1016 99077338 : if (likely(scan->rs_inited))
1017 : {
1018 : /* continue from previously returned page/tuple */
1019 97165244 : page = BufferGetPage(scan->rs_cbuf);
1020 :
1021 97165244 : lineindex = scan->rs_cindex + dir;
1022 97165244 : if (ScanDirectionIsForward(dir))
1023 97164586 : linesleft = scan->rs_ntuples - lineindex;
1024 : else
1025 658 : linesleft = scan->rs_cindex;
1026 : /* lineindex now references the next or previous visible tid */
1027 :
1028 97165244 : goto continue_page;
1029 : }
1030 :
1031 : /*
1032 : * advance the scan until we find a qualifying tuple or run out of stuff
1033 : * to scan
1034 : */
1035 : while (true)
1036 : {
1037 7113314 : heap_fetch_next_buffer(scan, dir);
1038 :
1039 : /* did we run out of blocks to scan? */
1040 7113260 : if (!BufferIsValid(scan->rs_cbuf))
1041 1585072 : break;
1042 :
1043 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
1044 :
1045 : /* prune the page and determine visible tuple offsets */
1046 5528188 : heap_prepare_pagescan((TableScanDesc) scan);
1047 5528172 : page = BufferGetPage(scan->rs_cbuf);
1048 5528172 : linesleft = scan->rs_ntuples;
1049 5528172 : lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
1050 :
1051 : /* block is the same for all tuples, set it once outside the loop */
1052 5528172 : ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1053 :
1054 : /* lineindex now references the next or previous visible tid */
1055 102693416 : continue_page:
1056 :
1057 199411448 : for (; linesleft > 0; linesleft--, lineindex += dir)
1058 : {
1059 : ItemId lpp;
1060 : OffsetNumber lineoff;
1061 :
1062 : Assert(lineindex <= scan->rs_ntuples);
1063 194210228 : lineoff = scan->rs_vistuples[lineindex];
1064 194210228 : lpp = PageGetItemId(page, lineoff);
1065 : Assert(ItemIdIsNormal(lpp));
1066 :
1067 194210228 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1068 194210228 : tuple->t_len = ItemIdGetLength(lpp);
1069 194210228 : ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
1070 :
1071 : /* skip any tuples that don't match the scan key */
1072 194210228 : if (key != NULL &&
1073 97548600 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1074 : nkeys, key))
1075 96718032 : continue;
1076 :
1077 97492196 : scan->rs_cindex = lineindex;
1078 97492196 : return;
1079 : }
1080 : }
1081 :
1082 : /* end of scan */
1083 1585072 : if (BufferIsValid(scan->rs_cbuf))
1084 0 : ReleaseBuffer(scan->rs_cbuf);
1085 1585072 : scan->rs_cbuf = InvalidBuffer;
1086 1585072 : scan->rs_cblock = InvalidBlockNumber;
1087 1585072 : scan->rs_prefetch_block = InvalidBlockNumber;
1088 1585072 : tuple->t_data = NULL;
1089 1585072 : scan->rs_inited = false;
1090 : }
1091 :
1092 :
1093 : /* ----------------------------------------------------------------
1094 : * heap access method interface
1095 : * ----------------------------------------------------------------
1096 : */
1097 :
1098 :
1099 : TableScanDesc
1100 769220 : heap_beginscan(Relation relation, Snapshot snapshot,
1101 : int nkeys, ScanKey key,
1102 : ParallelTableScanDesc parallel_scan,
1103 : uint32 flags)
1104 : {
1105 : HeapScanDesc scan;
1106 :
1107 : /*
1108 : * increment relation ref count while scanning relation
1109 : *
1110 : * This is just to make really sure the relcache entry won't go away while
1111 : * the scan has a pointer to it. Caller should be holding the rel open
1112 : * anyway, so this is redundant in all normal scenarios...
1113 : */
1114 769220 : RelationIncrementReferenceCount(relation);
1115 :
1116 : /*
1117 : * allocate and initialize scan descriptor
1118 : */
1119 769220 : if (flags & SO_TYPE_BITMAPSCAN)
1120 : {
1121 16154 : BitmapHeapScanDesc bscan = palloc(sizeof(BitmapHeapScanDescData));
1122 :
1123 : /*
1124 : * Bitmap Heap scans do not have any fields that a normal Heap Scan
1125 : * does not have, so no special initializations required here.
1126 : */
1127 16154 : scan = (HeapScanDesc) bscan;
1128 : }
1129 : else
1130 753066 : scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1131 :
1132 769220 : scan->rs_base.rs_rd = relation;
1133 769220 : scan->rs_base.rs_snapshot = snapshot;
1134 769220 : scan->rs_base.rs_nkeys = nkeys;
1135 769220 : scan->rs_base.rs_flags = flags;
1136 769220 : scan->rs_base.rs_parallel = parallel_scan;
1137 769220 : scan->rs_strategy = NULL; /* set in initscan */
1138 769220 : scan->rs_cbuf = InvalidBuffer;
1139 :
1140 : /*
1141 : * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1142 : */
1143 769220 : if (!(snapshot && IsMVCCSnapshot(snapshot)))
1144 58078 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1145 :
1146 : /* Check that a historic snapshot is not used for non-catalog tables */
1147 769220 : if (snapshot &&
1148 751964 : IsHistoricMVCCSnapshot(snapshot) &&
1149 1320 : !RelationIsAccessibleInLogicalDecoding(relation))
1150 : {
1151 0 : ereport(ERROR,
1152 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1153 : errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1154 : RelationGetRelationName(relation))));
1155 : }
1156 :
1157 : /*
1158 : * For seqscan and sample scans in a serializable transaction, acquire a
1159 : * predicate lock on the entire relation. This is required not only to
1160 : * lock all the matching tuples, but also to conflict with new insertions
1161 : * into the table. In an indexscan, we take page locks on the index pages
1162 : * covering the range specified in the scan qual, but in a heap scan there
1163 : * is nothing more fine-grained to lock. A bitmap scan is a different
1164 : * story, there we have already scanned the index and locked the index
1165 : * pages covering the predicate. But in that case we still have to lock
1166 : * any matching heap tuples. For sample scan we could optimize the locking
1167 : * to be at least page-level granularity, but we'd need to add per-tuple
1168 : * locking for that.
1169 : */
1170 769220 : if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
1171 : {
1172 : /*
1173 : * Ensure a missing snapshot is noticed reliably, even if the
1174 : * isolation mode means predicate locking isn't performed (and
1175 : * therefore the snapshot isn't used here).
1176 : */
1177 : Assert(snapshot);
1178 733170 : PredicateLockRelation(relation, snapshot);
1179 : }
1180 :
1181 : /* we only need to set this up once */
1182 769220 : scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1183 :
1184 : /*
1185 : * Allocate memory to keep track of page allocation for parallel workers
1186 : * when doing a parallel scan.
1187 : */
1188 769220 : if (parallel_scan != NULL)
1189 3908 : scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
1190 : else
1191 765312 : scan->rs_parallelworkerdata = NULL;
1192 :
1193 : /*
1194 : * we do this here instead of in initscan() because heap_rescan also calls
1195 : * initscan() and we don't want to allocate memory again
1196 : */
1197 769220 : if (nkeys > 0)
1198 441744 : scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1199 : else
1200 327476 : scan->rs_base.rs_key = NULL;
1201 :
1202 769220 : initscan(scan, key, false);
1203 :
1204 769216 : scan->rs_read_stream = NULL;
1205 :
1206 : /*
1207 : * Set up a read stream for sequential scans and TID range scans. This
1208 : * should be done after initscan() because initscan() allocates the
1209 : * BufferAccessStrategy object passed to the read stream API.
1210 : */
1211 769216 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1212 36196 : scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
1213 734874 : {
1214 : ReadStreamBlockNumberCB cb;
1215 :
1216 734874 : if (scan->rs_base.rs_parallel)
1217 3908 : cb = heap_scan_stream_read_next_parallel;
1218 : else
1219 730966 : cb = heap_scan_stream_read_next_serial;
1220 :
1221 : /* ---
1222 : * It is safe to use batchmode as the only locks taken by `cb`
1223 : * are never taken while waiting for IO:
1224 : * - SyncScanLock is used in the non-parallel case
1225 : * - in the parallel case, only spinlocks and atomics are used
1226 : * ---
1227 : */
1228 734874 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
1229 : READ_STREAM_USE_BATCHING,
1230 : scan->rs_strategy,
1231 : scan->rs_base.rs_rd,
1232 : MAIN_FORKNUM,
1233 : cb,
1234 : scan,
1235 : 0);
1236 : }
1237 34342 : else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1238 : {
1239 16154 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT |
1240 : READ_STREAM_USE_BATCHING,
1241 : scan->rs_strategy,
1242 : scan->rs_base.rs_rd,
1243 : MAIN_FORKNUM,
1244 : bitmapheap_stream_read_next,
1245 : scan,
1246 : sizeof(TBMIterateResult));
1247 : }
1248 :
1249 :
1250 769216 : return (TableScanDesc) scan;
1251 : }
1252 :
1253 : void
1254 1223580 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1255 : bool allow_strat, bool allow_sync, bool allow_pagemode)
1256 : {
1257 1223580 : HeapScanDesc scan = (HeapScanDesc) sscan;
1258 :
1259 1223580 : if (set_params)
1260 : {
1261 30 : if (allow_strat)
1262 30 : scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
1263 : else
1264 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
1265 :
1266 30 : if (allow_sync)
1267 12 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
1268 : else
1269 18 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
1270 :
1271 30 : if (allow_pagemode && scan->rs_base.rs_snapshot &&
1272 30 : IsMVCCSnapshot(scan->rs_base.rs_snapshot))
1273 30 : scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
1274 : else
1275 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1276 : }
1277 :
1278 : /*
1279 : * unpin scan buffers
1280 : */
1281 1223580 : if (BufferIsValid(scan->rs_cbuf))
1282 : {
1283 3162 : ReleaseBuffer(scan->rs_cbuf);
1284 3162 : scan->rs_cbuf = InvalidBuffer;
1285 : }
1286 :
1287 : /*
1288 : * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1289 : * additional data vs a normal HeapScan
1290 : */
1291 :
1292 : /*
1293 : * The read stream is reset on rescan. This must be done before
1294 : * initscan(), as some state referred to by read_stream_reset() is reset
1295 : * in initscan().
1296 : */
1297 1223580 : if (scan->rs_read_stream)
1298 1223544 : read_stream_reset(scan->rs_read_stream);
1299 :
1300 : /*
1301 : * reinitialize scan descriptor
1302 : */
1303 1223580 : initscan(scan, key, true);
1304 1223580 : }
1305 :
1306 : void
1307 764552 : heap_endscan(TableScanDesc sscan)
1308 : {
1309 764552 : HeapScanDesc scan = (HeapScanDesc) sscan;
1310 :
1311 : /* Note: no locking manipulations needed */
1312 :
1313 : /*
1314 : * unpin scan buffers
1315 : */
1316 764552 : if (BufferIsValid(scan->rs_cbuf))
1317 320888 : ReleaseBuffer(scan->rs_cbuf);
1318 :
1319 : /*
1320 : * Must free the read stream before freeing the BufferAccessStrategy.
1321 : */
1322 764552 : if (scan->rs_read_stream)
1323 746470 : read_stream_end(scan->rs_read_stream);
1324 :
1325 : /*
1326 : * decrement relation reference count and free scan descriptor storage
1327 : */
1328 764552 : RelationDecrementReferenceCount(scan->rs_base.rs_rd);
1329 :
1330 764552 : if (scan->rs_base.rs_key)
1331 441686 : pfree(scan->rs_base.rs_key);
1332 :
1333 764552 : if (scan->rs_strategy != NULL)
1334 25084 : FreeAccessStrategy(scan->rs_strategy);
1335 :
1336 764552 : if (scan->rs_parallelworkerdata != NULL)
1337 3908 : pfree(scan->rs_parallelworkerdata);
1338 :
1339 764552 : if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1340 80380 : UnregisterSnapshot(scan->rs_base.rs_snapshot);
1341 :
1342 764552 : pfree(scan);
1343 764552 : }
1344 :
1345 : HeapTuple
1346 20127804 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
1347 : {
1348 20127804 : HeapScanDesc scan = (HeapScanDesc) sscan;
1349 :
1350 : /*
1351 : * This is still widely used directly, without going through table AM, so
1352 : * add a safety check. It's possible we should, at a later point,
1353 : * downgrade this to an assert. The reason for checking the AM routine,
1354 : * rather than the AM oid, is that this allows to write regression tests
1355 : * that create another AM reusing the heap handler.
1356 : */
1357 20127804 : if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
1358 0 : ereport(ERROR,
1359 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1360 : errmsg_internal("only heap AM is supported")));
1361 :
1362 : /*
1363 : * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1364 : * for catalog or regular tables. See detailed comments in xact.c where
1365 : * these variables are declared. Normally we have such a check at tableam
1366 : * level API but this is called from many places so we need to ensure it
1367 : * here.
1368 : */
1369 20127804 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
1370 0 : elog(ERROR, "unexpected heap_getnext call during logical decoding");
1371 :
1372 : /* Note: no locking manipulations needed */
1373 :
1374 20127804 : if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1375 5366422 : heapgettup_pagemode(scan, direction,
1376 5366422 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1377 : else
1378 14761382 : heapgettup(scan, direction,
1379 14761382 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1380 :
1381 20127804 : if (scan->rs_ctup.t_data == NULL)
1382 132860 : return NULL;
1383 :
1384 : /*
1385 : * if we get here it means we have a new current scan tuple, so point to
1386 : * the proper return buffer and return the tuple.
1387 : */
1388 :
1389 19994944 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1390 :
1391 19994944 : return &scan->rs_ctup;
1392 : }
1393 :
1394 : bool
1395 94674206 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
1396 : {
1397 94674206 : HeapScanDesc scan = (HeapScanDesc) sscan;
1398 :
1399 : /* Note: no locking manipulations needed */
1400 :
1401 94674206 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1402 93702128 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1403 : else
1404 972078 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1405 :
1406 94674152 : if (scan->rs_ctup.t_data == NULL)
1407 : {
1408 1492640 : ExecClearTuple(slot);
1409 1492640 : return false;
1410 : }
1411 :
1412 : /*
1413 : * if we get here it means we have a new current scan tuple, so point to
1414 : * the proper return buffer and return the tuple.
1415 : */
1416 :
1417 93181512 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1418 :
1419 93181512 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1420 : scan->rs_cbuf);
1421 93181512 : return true;
1422 : }
1423 :
1424 : void
1425 1920 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
1426 : ItemPointer maxtid)
1427 : {
1428 1920 : HeapScanDesc scan = (HeapScanDesc) sscan;
1429 : BlockNumber startBlk;
1430 : BlockNumber numBlks;
1431 : ItemPointerData highestItem;
1432 : ItemPointerData lowestItem;
1433 :
1434 : /*
1435 : * For relations without any pages, we can simply leave the TID range
1436 : * unset. There will be no tuples to scan, therefore no tuples outside
1437 : * the given TID range.
1438 : */
1439 1920 : if (scan->rs_nblocks == 0)
1440 48 : return;
1441 :
1442 : /*
1443 : * Set up some ItemPointers which point to the first and last possible
1444 : * tuples in the heap.
1445 : */
1446 1908 : ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
1447 1908 : ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
1448 :
1449 : /*
1450 : * If the given maximum TID is below the highest possible TID in the
1451 : * relation, then restrict the range to that, otherwise we scan to the end
1452 : * of the relation.
1453 : */
1454 1908 : if (ItemPointerCompare(maxtid, &highestItem) < 0)
1455 140 : ItemPointerCopy(maxtid, &highestItem);
1456 :
1457 : /*
1458 : * If the given minimum TID is above the lowest possible TID in the
1459 : * relation, then restrict the range to only scan for TIDs above that.
1460 : */
1461 1908 : if (ItemPointerCompare(mintid, &lowestItem) > 0)
1462 1762 : ItemPointerCopy(mintid, &lowestItem);
1463 :
1464 : /*
1465 : * Check for an empty range and protect from would be negative results
1466 : * from the numBlks calculation below.
1467 : */
1468 1908 : if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
1469 : {
1470 : /* Set an empty range of blocks to scan */
1471 36 : heap_setscanlimits(sscan, 0, 0);
1472 36 : return;
1473 : }
1474 :
1475 : /*
1476 : * Calculate the first block and the number of blocks we must scan. We
1477 : * could be more aggressive here and perform some more validation to try
1478 : * and further narrow the scope of blocks to scan by checking if the
1479 : * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1480 : * advance startBlk by one. Likewise, if highestItem has an offset of 0
1481 : * we could scan one fewer blocks. However, such an optimization does not
1482 : * seem worth troubling over, currently.
1483 : */
1484 1872 : startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
1485 :
1486 1872 : numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
1487 1872 : ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
1488 :
1489 : /* Set the start block and number of blocks to scan */
1490 1872 : heap_setscanlimits(sscan, startBlk, numBlks);
1491 :
1492 : /* Finally, set the TID range in sscan */
1493 1872 : ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1494 1872 : ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1495 : }
1496 :
1497 : bool
1498 8602 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
1499 : TupleTableSlot *slot)
1500 : {
1501 8602 : HeapScanDesc scan = (HeapScanDesc) sscan;
1502 8602 : ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1503 8602 : ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1504 :
1505 : /* Note: no locking manipulations needed */
1506 : for (;;)
1507 : {
1508 8788 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1509 8788 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1510 : else
1511 0 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1512 :
1513 8772 : if (scan->rs_ctup.t_data == NULL)
1514 : {
1515 94 : ExecClearTuple(slot);
1516 94 : return false;
1517 : }
1518 :
1519 : /*
1520 : * heap_set_tidrange will have used heap_setscanlimits to limit the
1521 : * range of pages we scan to only ones that can contain the TID range
1522 : * we're scanning for. Here we must filter out any tuples from these
1523 : * pages that are outside of that range.
1524 : */
1525 8678 : if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1526 : {
1527 186 : ExecClearTuple(slot);
1528 :
1529 : /*
1530 : * When scanning backwards, the TIDs will be in descending order.
1531 : * Future tuples in this direction will be lower still, so we can
1532 : * just return false to indicate there will be no more tuples.
1533 : */
1534 186 : if (ScanDirectionIsBackward(direction))
1535 0 : return false;
1536 :
1537 186 : continue;
1538 : }
1539 :
1540 : /*
1541 : * Likewise for the final page, we must filter out TIDs greater than
1542 : * maxtid.
1543 : */
1544 8492 : if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1545 : {
1546 76 : ExecClearTuple(slot);
1547 :
1548 : /*
1549 : * When scanning forward, the TIDs will be in ascending order.
1550 : * Future tuples in this direction will be higher still, so we can
1551 : * just return false to indicate there will be no more tuples.
1552 : */
1553 76 : if (ScanDirectionIsForward(direction))
1554 76 : return false;
1555 0 : continue;
1556 : }
1557 :
1558 8416 : break;
1559 : }
1560 :
1561 : /*
1562 : * if we get here it means we have a new current scan tuple, so point to
1563 : * the proper return buffer and return the tuple.
1564 : */
1565 8416 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1566 :
1567 8416 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1568 8416 : return true;
1569 : }
1570 :
1571 : /*
1572 : * heap_fetch - retrieve tuple with given tid
1573 : *
1574 : * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1575 : * the tuple, fill in the remaining fields of *tuple, and check the tuple
1576 : * against the specified snapshot.
1577 : *
1578 : * If successful (tuple found and passes snapshot time qual), then *userbuf
1579 : * is set to the buffer holding the tuple and true is returned. The caller
1580 : * must unpin the buffer when done with the tuple.
1581 : *
1582 : * If the tuple is not found (ie, item number references a deleted slot),
1583 : * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1584 : * and false is returned.
1585 : *
1586 : * If the tuple is found but fails the time qual check, then the behavior
1587 : * depends on the keep_buf parameter. If keep_buf is false, the results
1588 : * are the same as for the tuple-not-found case. If keep_buf is true,
1589 : * then tuple->t_data and *userbuf are returned as for the success case,
1590 : * and again the caller must unpin the buffer; but false is returned.
1591 : *
1592 : * heap_fetch does not follow HOT chains: only the exact TID requested will
1593 : * be fetched.
1594 : *
1595 : * It is somewhat inconsistent that we ereport() on invalid block number but
1596 : * return false on invalid item number. There are a couple of reasons though.
1597 : * One is that the caller can relatively easily check the block number for
1598 : * validity, but cannot check the item number without reading the page
1599 : * himself. Another is that when we are following a t_ctid link, we can be
1600 : * reasonably confident that the page number is valid (since VACUUM shouldn't
1601 : * truncate off the destination page without having killed the referencing
1602 : * tuple first), but the item number might well not be good.
1603 : */
1604 : bool
1605 355924 : heap_fetch(Relation relation,
1606 : Snapshot snapshot,
1607 : HeapTuple tuple,
1608 : Buffer *userbuf,
1609 : bool keep_buf)
1610 : {
1611 355924 : ItemPointer tid = &(tuple->t_self);
1612 : ItemId lp;
1613 : Buffer buffer;
1614 : Page page;
1615 : OffsetNumber offnum;
1616 : bool valid;
1617 :
1618 : /*
1619 : * Fetch and pin the appropriate page of the relation.
1620 : */
1621 355924 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1622 :
1623 : /*
1624 : * Need share lock on buffer to examine tuple commit status.
1625 : */
1626 355908 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
1627 355908 : page = BufferGetPage(buffer);
1628 :
1629 : /*
1630 : * We'd better check for out-of-range offnum in case of VACUUM since the
1631 : * TID was obtained.
1632 : */
1633 355908 : offnum = ItemPointerGetOffsetNumber(tid);
1634 355908 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1635 : {
1636 6 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1637 6 : ReleaseBuffer(buffer);
1638 6 : *userbuf = InvalidBuffer;
1639 6 : tuple->t_data = NULL;
1640 6 : return false;
1641 : }
1642 :
1643 : /*
1644 : * get the item line pointer corresponding to the requested tid
1645 : */
1646 355902 : lp = PageGetItemId(page, offnum);
1647 :
1648 : /*
1649 : * Must check for deleted tuple.
1650 : */
1651 355902 : if (!ItemIdIsNormal(lp))
1652 : {
1653 690 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1654 690 : ReleaseBuffer(buffer);
1655 690 : *userbuf = InvalidBuffer;
1656 690 : tuple->t_data = NULL;
1657 690 : return false;
1658 : }
1659 :
1660 : /*
1661 : * fill in *tuple fields
1662 : */
1663 355212 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1664 355212 : tuple->t_len = ItemIdGetLength(lp);
1665 355212 : tuple->t_tableOid = RelationGetRelid(relation);
1666 :
1667 : /*
1668 : * check tuple visibility, then release lock
1669 : */
1670 355212 : valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1671 :
1672 355212 : if (valid)
1673 355090 : PredicateLockTID(relation, &(tuple->t_self), snapshot,
1674 355090 : HeapTupleHeaderGetXmin(tuple->t_data));
1675 :
1676 355212 : HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1677 :
1678 355212 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1679 :
1680 355212 : if (valid)
1681 : {
1682 : /*
1683 : * All checks passed, so return the tuple as valid. Caller is now
1684 : * responsible for releasing the buffer.
1685 : */
1686 355090 : *userbuf = buffer;
1687 :
1688 355090 : return true;
1689 : }
1690 :
1691 : /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1692 122 : if (keep_buf)
1693 68 : *userbuf = buffer;
1694 : else
1695 : {
1696 54 : ReleaseBuffer(buffer);
1697 54 : *userbuf = InvalidBuffer;
1698 54 : tuple->t_data = NULL;
1699 : }
1700 :
1701 122 : return false;
1702 : }
1703 :
1704 : /*
1705 : * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1706 : *
1707 : * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1708 : * of a HOT chain), and buffer is the buffer holding this tuple. We search
1709 : * for the first chain member satisfying the given snapshot. If one is
1710 : * found, we update *tid to reference that tuple's offset number, and
1711 : * return true. If no match, return false without modifying *tid.
1712 : *
1713 : * heapTuple is a caller-supplied buffer. When a match is found, we return
1714 : * the tuple here, in addition to updating *tid. If no match is found, the
1715 : * contents of this buffer on return are undefined.
1716 : *
1717 : * If all_dead is not NULL, we check non-visible tuples to see if they are
1718 : * globally dead; *all_dead is set true if all members of the HOT chain
1719 : * are vacuumable, false if not.
1720 : *
1721 : * Unlike heap_fetch, the caller must already have pin and (at least) share
1722 : * lock on the buffer; it is still pinned/locked at exit.
1723 : */
1724 : bool
1725 45017828 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1726 : Snapshot snapshot, HeapTuple heapTuple,
1727 : bool *all_dead, bool first_call)
1728 : {
1729 45017828 : Page page = BufferGetPage(buffer);
1730 45017828 : TransactionId prev_xmax = InvalidTransactionId;
1731 : BlockNumber blkno;
1732 : OffsetNumber offnum;
1733 : bool at_chain_start;
1734 : bool valid;
1735 : bool skip;
1736 45017828 : GlobalVisState *vistest = NULL;
1737 :
1738 : /* If this is not the first call, previous call returned a (live!) tuple */
1739 45017828 : if (all_dead)
1740 38568040 : *all_dead = first_call;
1741 :
1742 45017828 : blkno = ItemPointerGetBlockNumber(tid);
1743 45017828 : offnum = ItemPointerGetOffsetNumber(tid);
1744 45017828 : at_chain_start = first_call;
1745 45017828 : skip = !first_call;
1746 :
1747 : /* XXX: we should assert that a snapshot is pushed or registered */
1748 : Assert(TransactionIdIsValid(RecentXmin));
1749 : Assert(BufferGetBlockNumber(buffer) == blkno);
1750 :
1751 : /* Scan through possible multiple members of HOT-chain */
1752 : for (;;)
1753 2878496 : {
1754 : ItemId lp;
1755 :
1756 : /* check for bogus TID */
1757 47896324 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1758 : break;
1759 :
1760 47896324 : lp = PageGetItemId(page, offnum);
1761 :
1762 : /* check for unused, dead, or redirected items */
1763 47896324 : if (!ItemIdIsNormal(lp))
1764 : {
1765 : /* We should only see a redirect at start of chain */
1766 1695890 : if (ItemIdIsRedirected(lp) && at_chain_start)
1767 : {
1768 : /* Follow the redirect */
1769 1009554 : offnum = ItemIdGetRedirect(lp);
1770 1009554 : at_chain_start = false;
1771 1009554 : continue;
1772 : }
1773 : /* else must be end of chain */
1774 686336 : break;
1775 : }
1776 :
1777 : /*
1778 : * Update heapTuple to point to the element of the HOT chain we're
1779 : * currently investigating. Having t_self set correctly is important
1780 : * because the SSI checks and the *Satisfies routine for historical
1781 : * MVCC snapshots need the correct tid to decide about the visibility.
1782 : */
1783 46200434 : heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1784 46200434 : heapTuple->t_len = ItemIdGetLength(lp);
1785 46200434 : heapTuple->t_tableOid = RelationGetRelid(relation);
1786 46200434 : ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1787 :
1788 : /*
1789 : * Shouldn't see a HEAP_ONLY tuple at chain start.
1790 : */
1791 46200434 : if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1792 0 : break;
1793 :
1794 : /*
1795 : * The xmin should match the previous xmax value, else chain is
1796 : * broken.
1797 : */
1798 48069376 : if (TransactionIdIsValid(prev_xmax) &&
1799 1868942 : !TransactionIdEquals(prev_xmax,
1800 : HeapTupleHeaderGetXmin(heapTuple->t_data)))
1801 0 : break;
1802 :
1803 : /*
1804 : * When first_call is true (and thus, skip is initially false) we'll
1805 : * return the first tuple we find. But on later passes, heapTuple
1806 : * will initially be pointing to the tuple we returned last time.
1807 : * Returning it again would be incorrect (and would loop forever), so
1808 : * we skip it and return the next match we find.
1809 : */
1810 46200434 : if (!skip)
1811 : {
1812 : /* If it's visible per the snapshot, we must return it */
1813 46029562 : valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1814 46029562 : HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1815 : buffer, snapshot);
1816 :
1817 46029552 : if (valid)
1818 : {
1819 31896804 : ItemPointerSetOffsetNumber(tid, offnum);
1820 31896804 : PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1821 31896804 : HeapTupleHeaderGetXmin(heapTuple->t_data));
1822 31896804 : if (all_dead)
1823 25988520 : *all_dead = false;
1824 31896804 : return true;
1825 : }
1826 : }
1827 14303620 : skip = false;
1828 :
1829 : /*
1830 : * If we can't see it, maybe no one else can either. At caller
1831 : * request, check whether all chain members are dead to all
1832 : * transactions.
1833 : *
1834 : * Note: if you change the criterion here for what is "dead", fix the
1835 : * planner's get_actual_variable_range() function to match.
1836 : */
1837 14303620 : if (all_dead && *all_dead)
1838 : {
1839 12878384 : if (!vistest)
1840 12642938 : vistest = GlobalVisTestFor(relation);
1841 :
1842 12878384 : if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1843 12193916 : *all_dead = false;
1844 : }
1845 :
1846 : /*
1847 : * Check to see if HOT chain continues past this tuple; if so fetch
1848 : * the next offnum and loop around.
1849 : */
1850 14303620 : if (HeapTupleIsHotUpdated(heapTuple))
1851 : {
1852 : Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1853 : blkno);
1854 1868942 : offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1855 1868942 : at_chain_start = false;
1856 1868942 : prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1857 : }
1858 : else
1859 12434678 : break; /* end of chain */
1860 : }
1861 :
1862 13121014 : return false;
1863 : }
1864 :
1865 : /*
1866 : * heap_get_latest_tid - get the latest tid of a specified tuple
1867 : *
1868 : * Actually, this gets the latest version that is visible according to the
1869 : * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1870 : * possibly uncommitted version.
1871 : *
1872 : * *tid is both an input and an output parameter: it is updated to
1873 : * show the latest version of the row. Note that it will not be changed
1874 : * if no version of the row passes the snapshot test.
1875 : */
1876 : void
1877 300 : heap_get_latest_tid(TableScanDesc sscan,
1878 : ItemPointer tid)
1879 : {
1880 300 : Relation relation = sscan->rs_rd;
1881 300 : Snapshot snapshot = sscan->rs_snapshot;
1882 : ItemPointerData ctid;
1883 : TransactionId priorXmax;
1884 :
1885 : /*
1886 : * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1887 : * Assume that t_ctid links are valid however - there shouldn't be invalid
1888 : * ones in the table.
1889 : */
1890 : Assert(ItemPointerIsValid(tid));
1891 :
1892 : /*
1893 : * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1894 : * need to examine, and *tid is the TID we will return if ctid turns out
1895 : * to be bogus.
1896 : *
1897 : * Note that we will loop until we reach the end of the t_ctid chain.
1898 : * Depending on the snapshot passed, there might be at most one visible
1899 : * version of the row, but we don't try to optimize for that.
1900 : */
1901 300 : ctid = *tid;
1902 300 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1903 : for (;;)
1904 90 : {
1905 : Buffer buffer;
1906 : Page page;
1907 : OffsetNumber offnum;
1908 : ItemId lp;
1909 : HeapTupleData tp;
1910 : bool valid;
1911 :
1912 : /*
1913 : * Read, pin, and lock the page.
1914 : */
1915 390 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1916 390 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
1917 390 : page = BufferGetPage(buffer);
1918 :
1919 : /*
1920 : * Check for bogus item number. This is not treated as an error
1921 : * condition because it can happen while following a t_ctid link. We
1922 : * just assume that the prior tid is OK and return it unchanged.
1923 : */
1924 390 : offnum = ItemPointerGetOffsetNumber(&ctid);
1925 390 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1926 : {
1927 0 : UnlockReleaseBuffer(buffer);
1928 0 : break;
1929 : }
1930 390 : lp = PageGetItemId(page, offnum);
1931 390 : if (!ItemIdIsNormal(lp))
1932 : {
1933 0 : UnlockReleaseBuffer(buffer);
1934 0 : break;
1935 : }
1936 :
1937 : /* OK to access the tuple */
1938 390 : tp.t_self = ctid;
1939 390 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1940 390 : tp.t_len = ItemIdGetLength(lp);
1941 390 : tp.t_tableOid = RelationGetRelid(relation);
1942 :
1943 : /*
1944 : * After following a t_ctid link, we might arrive at an unrelated
1945 : * tuple. Check for XMIN match.
1946 : */
1947 480 : if (TransactionIdIsValid(priorXmax) &&
1948 90 : !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1949 : {
1950 0 : UnlockReleaseBuffer(buffer);
1951 0 : break;
1952 : }
1953 :
1954 : /*
1955 : * Check tuple visibility; if visible, set it as the new result
1956 : * candidate.
1957 : */
1958 390 : valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1959 390 : HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1960 390 : if (valid)
1961 276 : *tid = ctid;
1962 :
1963 : /*
1964 : * If there's a valid t_ctid link, follow it, else we're done.
1965 : */
1966 552 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1967 276 : HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
1968 228 : HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
1969 114 : ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1970 : {
1971 300 : UnlockReleaseBuffer(buffer);
1972 300 : break;
1973 : }
1974 :
1975 90 : ctid = tp.t_data->t_ctid;
1976 90 : priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1977 90 : UnlockReleaseBuffer(buffer);
1978 : } /* end of loop */
1979 300 : }
1980 :
1981 :
1982 : /*
1983 : * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1984 : *
1985 : * This is called after we have waited for the XMAX transaction to terminate.
1986 : * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1987 : * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1988 : * hint bit if possible --- but beware that that may not yet be possible,
1989 : * if the transaction committed asynchronously.
1990 : *
1991 : * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
1992 : * even if it commits.
1993 : *
1994 : * Hence callers should look only at XMAX_INVALID.
1995 : *
1996 : * Note this is not allowed for tuples whose xmax is a multixact.
1997 : */
1998 : static void
1999 428 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
2000 : {
2001 : Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
2002 : Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
2003 :
2004 428 : if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
2005 : {
2006 766 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2007 338 : TransactionIdDidCommit(xid))
2008 284 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
2009 : xid);
2010 : else
2011 144 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
2012 : InvalidTransactionId);
2013 : }
2014 428 : }
2015 :
2016 :
2017 : /*
2018 : * GetBulkInsertState - prepare status object for a bulk insert
2019 : */
2020 : BulkInsertState
2021 4662 : GetBulkInsertState(void)
2022 : {
2023 : BulkInsertState bistate;
2024 :
2025 4662 : bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2026 4662 : bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
2027 4662 : bistate->current_buf = InvalidBuffer;
2028 4662 : bistate->next_free = InvalidBlockNumber;
2029 4662 : bistate->last_free = InvalidBlockNumber;
2030 4662 : bistate->already_extended_by = 0;
2031 4662 : return bistate;
2032 : }
2033 :
2034 : /*
2035 : * FreeBulkInsertState - clean up after finishing a bulk insert
2036 : */
2037 : void
2038 4374 : FreeBulkInsertState(BulkInsertState bistate)
2039 : {
2040 4374 : if (bistate->current_buf != InvalidBuffer)
2041 3510 : ReleaseBuffer(bistate->current_buf);
2042 4374 : FreeAccessStrategy(bistate->strategy);
2043 4374 : pfree(bistate);
2044 4374 : }
2045 :
2046 : /*
2047 : * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
2048 : */
2049 : void
2050 161516 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
2051 : {
2052 161516 : if (bistate->current_buf != InvalidBuffer)
2053 60042 : ReleaseBuffer(bistate->current_buf);
2054 161516 : bistate->current_buf = InvalidBuffer;
2055 :
2056 : /*
2057 : * Despite the name, we also reset bulk relation extension state.
2058 : * Otherwise we can end up erroring out due to looking for free space in
2059 : * ->next_free of one partition, even though ->next_free was set when
2060 : * extending another partition. It could obviously also be bad for
2061 : * efficiency to look at existing blocks at offsets from another
2062 : * partition, even if we don't error out.
2063 : */
2064 161516 : bistate->next_free = InvalidBlockNumber;
2065 161516 : bistate->last_free = InvalidBlockNumber;
2066 161516 : }
2067 :
2068 :
2069 : /*
2070 : * heap_insert - insert tuple into a heap
2071 : *
2072 : * The new tuple is stamped with current transaction ID and the specified
2073 : * command ID.
2074 : *
2075 : * See table_tuple_insert for comments about most of the input flags, except
2076 : * that this routine directly takes a tuple rather than a slot.
2077 : *
2078 : * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
2079 : * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
2080 : * implement table_tuple_insert_speculative().
2081 : *
2082 : * On return the header fields of *tup are updated to match the stored tuple;
2083 : * in particular tup->t_self receives the actual TID where the tuple was
2084 : * stored. But note that any toasting of fields within the tuple data is NOT
2085 : * reflected into *tup.
2086 : */
2087 : void
2088 16637584 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2089 : int options, BulkInsertState bistate)
2090 : {
2091 16637584 : TransactionId xid = GetCurrentTransactionId();
2092 : HeapTuple heaptup;
2093 : Buffer buffer;
2094 16637568 : Buffer vmbuffer = InvalidBuffer;
2095 16637568 : bool all_visible_cleared = false;
2096 :
2097 : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2098 : Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
2099 : RelationGetNumberOfAttributes(relation));
2100 :
2101 16637568 : AssertHasSnapshotForToast(relation);
2102 :
2103 : /*
2104 : * Fill in tuple header fields and toast the tuple if necessary.
2105 : *
2106 : * Note: below this point, heaptup is the data we actually intend to store
2107 : * into the relation; tup is the caller's original untoasted data.
2108 : */
2109 16637568 : heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2110 :
2111 : /*
2112 : * Find buffer to insert this tuple into. If the page is all visible,
2113 : * this will also pin the requisite visibility map page.
2114 : */
2115 16637568 : buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2116 : InvalidBuffer, options, bistate,
2117 : &vmbuffer, NULL,
2118 : 0);
2119 :
2120 : /*
2121 : * We're about to do the actual insert -- but check for conflict first, to
2122 : * avoid possibly having to roll back work we've just done.
2123 : *
2124 : * This is safe without a recheck as long as there is no possibility of
2125 : * another process scanning the page between this check and the insert
2126 : * being visible to the scan (i.e., an exclusive buffer content lock is
2127 : * continuously held from this point until the tuple insert is visible).
2128 : *
2129 : * For a heap insert, we only need to check for table-level SSI locks. Our
2130 : * new tuple can't possibly conflict with existing tuple locks, and heap
2131 : * page locks are only consolidated versions of tuple locks; they do not
2132 : * lock "gaps" as index page locks do. So we don't need to specify a
2133 : * buffer when making the call, which makes for a faster check.
2134 : */
2135 16637568 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2136 :
2137 : /* NO EREPORT(ERROR) from here till changes are logged */
2138 16637544 : START_CRIT_SECTION();
2139 :
2140 16637544 : RelationPutHeapTuple(relation, buffer, heaptup,
2141 16637544 : (options & HEAP_INSERT_SPECULATIVE) != 0);
2142 :
2143 16637544 : if (PageIsAllVisible(BufferGetPage(buffer)))
2144 : {
2145 14840 : all_visible_cleared = true;
2146 14840 : PageClearAllVisible(BufferGetPage(buffer));
2147 14840 : visibilitymap_clear(relation,
2148 14840 : ItemPointerGetBlockNumber(&(heaptup->t_self)),
2149 : vmbuffer, VISIBILITYMAP_VALID_BITS);
2150 : }
2151 :
2152 : /*
2153 : * XXX Should we set PageSetPrunable on this page ?
2154 : *
2155 : * The inserting transaction may eventually abort thus making this tuple
2156 : * DEAD and hence available for pruning. Though we don't want to optimize
2157 : * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2158 : * aborted tuple will never be pruned until next vacuum is triggered.
2159 : *
2160 : * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2161 : */
2162 :
2163 16637544 : MarkBufferDirty(buffer);
2164 :
2165 : /* XLOG stuff */
2166 16637544 : if (RelationNeedsWAL(relation))
2167 : {
2168 : xl_heap_insert xlrec;
2169 : xl_heap_header xlhdr;
2170 : XLogRecPtr recptr;
2171 14135590 : Page page = BufferGetPage(buffer);
2172 14135590 : uint8 info = XLOG_HEAP_INSERT;
2173 14135590 : int bufflags = 0;
2174 :
2175 : /*
2176 : * If this is a catalog, we need to transmit combo CIDs to properly
2177 : * decode, so log that as well.
2178 : */
2179 14135590 : if (RelationIsAccessibleInLogicalDecoding(relation))
2180 6788 : log_heap_new_cid(relation, heaptup);
2181 :
2182 : /*
2183 : * If this is the single and first tuple on page, we can reinit the
2184 : * page instead of restoring the whole thing. Set flag, and hide
2185 : * buffer references from XLogInsert.
2186 : */
2187 14317782 : if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2188 182192 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2189 : {
2190 180476 : info |= XLOG_HEAP_INIT_PAGE;
2191 180476 : bufflags |= REGBUF_WILL_INIT;
2192 : }
2193 :
2194 14135590 : xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2195 14135590 : xlrec.flags = 0;
2196 14135590 : if (all_visible_cleared)
2197 14834 : xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
2198 14135590 : if (options & HEAP_INSERT_SPECULATIVE)
2199 4122 : xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
2200 : Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
2201 :
2202 : /*
2203 : * For logical decoding, we need the tuple even if we're doing a full
2204 : * page write, so make sure it's included even if we take a full-page
2205 : * image. (XXX We could alternatively store a pointer into the FPW).
2206 : */
2207 14135590 : if (RelationIsLogicallyLogged(relation) &&
2208 499828 : !(options & HEAP_INSERT_NO_LOGICAL))
2209 : {
2210 499774 : xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2211 499774 : bufflags |= REGBUF_KEEP_DATA;
2212 :
2213 499774 : if (IsToastRelation(relation))
2214 3572 : xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
2215 : }
2216 :
2217 14135590 : XLogBeginInsert();
2218 14135590 : XLogRegisterData(&xlrec, SizeOfHeapInsert);
2219 :
2220 14135590 : xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2221 14135590 : xlhdr.t_infomask = heaptup->t_data->t_infomask;
2222 14135590 : xlhdr.t_hoff = heaptup->t_data->t_hoff;
2223 :
2224 : /*
2225 : * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2226 : * write the whole page to the xlog, we don't need to store
2227 : * xl_heap_header in the xlog.
2228 : */
2229 14135590 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2230 14135590 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
2231 : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2232 14135590 : XLogRegisterBufData(0,
2233 14135590 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2234 14135590 : heaptup->t_len - SizeofHeapTupleHeader);
2235 :
2236 : /* filtering by origin on a row level is much more efficient */
2237 14135590 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2238 :
2239 14135590 : recptr = XLogInsert(RM_HEAP_ID, info);
2240 :
2241 14135590 : PageSetLSN(page, recptr);
2242 : }
2243 :
2244 16637544 : END_CRIT_SECTION();
2245 :
2246 16637544 : UnlockReleaseBuffer(buffer);
2247 16637544 : if (vmbuffer != InvalidBuffer)
2248 15398 : ReleaseBuffer(vmbuffer);
2249 :
2250 : /*
2251 : * If tuple is cachable, mark it for invalidation from the caches in case
2252 : * we abort. Note it is OK to do this after releasing the buffer, because
2253 : * the heaptup data structure is all in local memory, not in the shared
2254 : * buffer.
2255 : */
2256 16637544 : CacheInvalidateHeapTuple(relation, heaptup, NULL);
2257 :
2258 : /* Note: speculative insertions are counted too, even if aborted later */
2259 16637544 : pgstat_count_heap_insert(relation, 1);
2260 :
2261 : /*
2262 : * If heaptup is a private copy, release it. Don't forget to copy t_self
2263 : * back to the caller's image, too.
2264 : */
2265 16637544 : if (heaptup != tup)
2266 : {
2267 36306 : tup->t_self = heaptup->t_self;
2268 36306 : heap_freetuple(heaptup);
2269 : }
2270 16637544 : }
2271 :
2272 : /*
2273 : * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2274 : * tuple header fields and toasts the tuple if necessary. Returns a toasted
2275 : * version of the tuple if it was toasted, or the original tuple if not. Note
2276 : * that in any case, the header fields are also set in the original tuple.
2277 : */
2278 : static HeapTuple
2279 19578732 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2280 : CommandId cid, int options)
2281 : {
2282 : /*
2283 : * To allow parallel inserts, we need to ensure that they are safe to be
2284 : * performed in workers. We have the infrastructure to allow parallel
2285 : * inserts in general except for the cases where inserts generate a new
2286 : * CommandId (eg. inserts into a table having a foreign key column).
2287 : */
2288 19578732 : if (IsParallelWorker())
2289 0 : ereport(ERROR,
2290 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2291 : errmsg("cannot insert tuples in a parallel worker")));
2292 :
2293 19578732 : tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2294 19578732 : tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2295 19578732 : tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2296 19578732 : HeapTupleHeaderSetXmin(tup->t_data, xid);
2297 19578732 : if (options & HEAP_INSERT_FROZEN)
2298 204192 : HeapTupleHeaderSetXminFrozen(tup->t_data);
2299 :
2300 19578732 : HeapTupleHeaderSetCmin(tup->t_data, cid);
2301 19578732 : HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2302 19578732 : tup->t_tableOid = RelationGetRelid(relation);
2303 :
2304 : /*
2305 : * If the new tuple is too big for storage or contains already toasted
2306 : * out-of-line attributes from some other relation, invoke the toaster.
2307 : */
2308 19578732 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
2309 61630 : relation->rd_rel->relkind != RELKIND_MATVIEW)
2310 : {
2311 : /* toast table entries should never be recursively toasted */
2312 : Assert(!HeapTupleHasExternal(tup));
2313 61534 : return tup;
2314 : }
2315 19517198 : else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2316 36394 : return heap_toast_insert_or_update(relation, tup, NULL, options);
2317 : else
2318 19480804 : return tup;
2319 : }
2320 :
2321 : /*
2322 : * Helper for heap_multi_insert() that computes the number of entire pages
2323 : * that inserting the remaining heaptuples requires. Used to determine how
2324 : * much the relation needs to be extended by.
2325 : */
2326 : static int
2327 727996 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
2328 : {
2329 727996 : size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2330 727996 : int npages = 1;
2331 :
2332 4849588 : for (int i = done; i < ntuples; i++)
2333 : {
2334 4121592 : size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2335 :
2336 4121592 : if (page_avail < tup_sz)
2337 : {
2338 31110 : npages++;
2339 31110 : page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2340 : }
2341 4121592 : page_avail -= tup_sz;
2342 : }
2343 :
2344 727996 : return npages;
2345 : }
2346 :
2347 : /*
2348 : * heap_multi_insert - insert multiple tuples into a heap
2349 : *
2350 : * This is like heap_insert(), but inserts multiple tuples in one operation.
2351 : * That's faster than calling heap_insert() in a loop, because when multiple
2352 : * tuples can be inserted on a single page, we can write just a single WAL
2353 : * record covering all of them, and only need to lock/unlock the page once.
2354 : *
2355 : * Note: this leaks memory into the current memory context. You can create a
2356 : * temporary context before calling this, if that's a problem.
2357 : */
2358 : void
2359 714702 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2360 : CommandId cid, int options, BulkInsertState bistate)
2361 : {
2362 714702 : TransactionId xid = GetCurrentTransactionId();
2363 : HeapTuple *heaptuples;
2364 : int i;
2365 : int ndone;
2366 : PGAlignedBlock scratch;
2367 : Page page;
2368 714702 : Buffer vmbuffer = InvalidBuffer;
2369 : bool needwal;
2370 : Size saveFreeSpace;
2371 714702 : bool need_tuple_data = RelationIsLogicallyLogged(relation);
2372 714702 : bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2373 714702 : bool starting_with_empty_page = false;
2374 714702 : int npages = 0;
2375 714702 : int npages_used = 0;
2376 :
2377 : /* currently not needed (thus unsupported) for heap_multi_insert() */
2378 : Assert(!(options & HEAP_INSERT_NO_LOGICAL));
2379 :
2380 714702 : AssertHasSnapshotForToast(relation);
2381 :
2382 714702 : needwal = RelationNeedsWAL(relation);
2383 714702 : saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2384 : HEAP_DEFAULT_FILLFACTOR);
2385 :
2386 : /* Toast and set header data in all the slots */
2387 714702 : heaptuples = palloc(ntuples * sizeof(HeapTuple));
2388 3655866 : for (i = 0; i < ntuples; i++)
2389 : {
2390 : HeapTuple tuple;
2391 :
2392 2941164 : tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2393 2941164 : slots[i]->tts_tableOid = RelationGetRelid(relation);
2394 2941164 : tuple->t_tableOid = slots[i]->tts_tableOid;
2395 2941164 : heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2396 : options);
2397 : }
2398 :
2399 : /*
2400 : * We're about to do the actual inserts -- but check for conflict first,
2401 : * to minimize the possibility of having to roll back work we've just
2402 : * done.
2403 : *
2404 : * A check here does not definitively prevent a serialization anomaly;
2405 : * that check MUST be done at least past the point of acquiring an
2406 : * exclusive buffer content lock on every buffer that will be affected,
2407 : * and MAY be done after all inserts are reflected in the buffers and
2408 : * those locks are released; otherwise there is a race condition. Since
2409 : * multiple buffers can be locked and unlocked in the loop below, and it
2410 : * would not be feasible to identify and lock all of those buffers before
2411 : * the loop, we must do a final check at the end.
2412 : *
2413 : * The check here could be omitted with no loss of correctness; it is
2414 : * present strictly as an optimization.
2415 : *
2416 : * For heap inserts, we only need to check for table-level SSI locks. Our
2417 : * new tuples can't possibly conflict with existing tuple locks, and heap
2418 : * page locks are only consolidated versions of tuple locks; they do not
2419 : * lock "gaps" as index page locks do. So we don't need to specify a
2420 : * buffer when making the call, which makes for a faster check.
2421 : */
2422 714702 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2423 :
2424 714702 : ndone = 0;
2425 1458994 : while (ndone < ntuples)
2426 : {
2427 : Buffer buffer;
2428 744292 : bool all_visible_cleared = false;
2429 744292 : bool all_frozen_set = false;
2430 : int nthispage;
2431 :
2432 744292 : CHECK_FOR_INTERRUPTS();
2433 :
2434 : /*
2435 : * Compute number of pages needed to fit the to-be-inserted tuples in
2436 : * the worst case. This will be used to determine how much to extend
2437 : * the relation by in RelationGetBufferForTuple(), if needed. If we
2438 : * filled a prior page from scratch, we can just update our last
2439 : * computation, but if we started with a partially filled page,
2440 : * recompute from scratch, the number of potentially required pages
2441 : * can vary due to tuples needing to fit onto the page, page headers
2442 : * etc.
2443 : */
2444 744292 : if (ndone == 0 || !starting_with_empty_page)
2445 : {
2446 727996 : npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2447 : saveFreeSpace);
2448 727996 : npages_used = 0;
2449 : }
2450 : else
2451 16296 : npages_used++;
2452 :
2453 : /*
2454 : * Find buffer where at least the next tuple will fit. If the page is
2455 : * all-visible, this will also pin the requisite visibility map page.
2456 : *
2457 : * Also pin visibility map page if COPY FREEZE inserts tuples into an
2458 : * empty page. See all_frozen_set below.
2459 : */
2460 744292 : buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2461 : InvalidBuffer, options, bistate,
2462 : &vmbuffer, NULL,
2463 : npages - npages_used);
2464 744292 : page = BufferGetPage(buffer);
2465 :
2466 744292 : starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
2467 :
2468 744292 : if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
2469 3322 : all_frozen_set = true;
2470 :
2471 : /* NO EREPORT(ERROR) from here till changes are logged */
2472 744292 : START_CRIT_SECTION();
2473 :
2474 : /*
2475 : * RelationGetBufferForTuple has ensured that the first tuple fits.
2476 : * Put that on the page, and then as many other tuples as fit.
2477 : */
2478 744292 : RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2479 :
2480 : /*
2481 : * For logical decoding we need combo CIDs to properly decode the
2482 : * catalog.
2483 : */
2484 744292 : if (needwal && need_cids)
2485 9976 : log_heap_new_cid(relation, heaptuples[ndone]);
2486 :
2487 2941164 : for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2488 : {
2489 2226462 : HeapTuple heaptup = heaptuples[ndone + nthispage];
2490 :
2491 2226462 : if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2492 29590 : break;
2493 :
2494 2196872 : RelationPutHeapTuple(relation, buffer, heaptup, false);
2495 :
2496 : /*
2497 : * For logical decoding we need combo CIDs to properly decode the
2498 : * catalog.
2499 : */
2500 2196872 : if (needwal && need_cids)
2501 9366 : log_heap_new_cid(relation, heaptup);
2502 : }
2503 :
2504 : /*
2505 : * If the page is all visible, need to clear that, unless we're only
2506 : * going to add further frozen rows to it.
2507 : *
2508 : * If we're only adding already frozen rows to a previously empty
2509 : * page, mark it as all-visible.
2510 : */
2511 744292 : if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
2512 : {
2513 6466 : all_visible_cleared = true;
2514 6466 : PageClearAllVisible(page);
2515 6466 : visibilitymap_clear(relation,
2516 : BufferGetBlockNumber(buffer),
2517 : vmbuffer, VISIBILITYMAP_VALID_BITS);
2518 : }
2519 737826 : else if (all_frozen_set)
2520 3322 : PageSetAllVisible(page);
2521 :
2522 : /*
2523 : * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2524 : */
2525 :
2526 744292 : MarkBufferDirty(buffer);
2527 :
2528 : /* XLOG stuff */
2529 744292 : if (needwal)
2530 : {
2531 : XLogRecPtr recptr;
2532 : xl_heap_multi_insert *xlrec;
2533 736636 : uint8 info = XLOG_HEAP2_MULTI_INSERT;
2534 : char *tupledata;
2535 : int totaldatalen;
2536 736636 : char *scratchptr = scratch.data;
2537 : bool init;
2538 736636 : int bufflags = 0;
2539 :
2540 : /*
2541 : * If the page was previously empty, we can reinit the page
2542 : * instead of restoring the whole thing.
2543 : */
2544 736636 : init = starting_with_empty_page;
2545 :
2546 : /* allocate xl_heap_multi_insert struct from the scratch area */
2547 736636 : xlrec = (xl_heap_multi_insert *) scratchptr;
2548 736636 : scratchptr += SizeOfHeapMultiInsert;
2549 :
2550 : /*
2551 : * Allocate offsets array. Unless we're reinitializing the page,
2552 : * in that case the tuples are stored in order starting at
2553 : * FirstOffsetNumber and we don't need to store the offsets
2554 : * explicitly.
2555 : */
2556 736636 : if (!init)
2557 710832 : scratchptr += nthispage * sizeof(OffsetNumber);
2558 :
2559 : /* the rest of the scratch space is used for tuple data */
2560 736636 : tupledata = scratchptr;
2561 :
2562 : /* check that the mutually exclusive flags are not both set */
2563 : Assert(!(all_visible_cleared && all_frozen_set));
2564 :
2565 736636 : xlrec->flags = 0;
2566 736636 : if (all_visible_cleared)
2567 6466 : xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
2568 736636 : if (all_frozen_set)
2569 26 : xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
2570 :
2571 736636 : xlrec->ntuples = nthispage;
2572 :
2573 : /*
2574 : * Write out an xl_multi_insert_tuple and the tuple data itself
2575 : * for each tuple.
2576 : */
2577 3266970 : for (i = 0; i < nthispage; i++)
2578 : {
2579 2530334 : HeapTuple heaptup = heaptuples[ndone + i];
2580 : xl_multi_insert_tuple *tuphdr;
2581 : int datalen;
2582 :
2583 2530334 : if (!init)
2584 1491234 : xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2585 : /* xl_multi_insert_tuple needs two-byte alignment. */
2586 2530334 : tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2587 2530334 : scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2588 :
2589 2530334 : tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2590 2530334 : tuphdr->t_infomask = heaptup->t_data->t_infomask;
2591 2530334 : tuphdr->t_hoff = heaptup->t_data->t_hoff;
2592 :
2593 : /* write bitmap [+ padding] [+ oid] + data */
2594 2530334 : datalen = heaptup->t_len - SizeofHeapTupleHeader;
2595 2530334 : memcpy(scratchptr,
2596 2530334 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2597 : datalen);
2598 2530334 : tuphdr->datalen = datalen;
2599 2530334 : scratchptr += datalen;
2600 : }
2601 736636 : totaldatalen = scratchptr - tupledata;
2602 : Assert((scratchptr - scratch.data) < BLCKSZ);
2603 :
2604 736636 : if (need_tuple_data)
2605 144 : xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2606 :
2607 : /*
2608 : * Signal that this is the last xl_heap_multi_insert record
2609 : * emitted by this call to heap_multi_insert(). Needed for logical
2610 : * decoding so it knows when to cleanup temporary data.
2611 : */
2612 736636 : if (ndone + nthispage == ntuples)
2613 713864 : xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
2614 :
2615 736636 : if (init)
2616 : {
2617 25804 : info |= XLOG_HEAP_INIT_PAGE;
2618 25804 : bufflags |= REGBUF_WILL_INIT;
2619 : }
2620 :
2621 : /*
2622 : * If we're doing logical decoding, include the new tuple data
2623 : * even if we take a full-page image of the page.
2624 : */
2625 736636 : if (need_tuple_data)
2626 144 : bufflags |= REGBUF_KEEP_DATA;
2627 :
2628 736636 : XLogBeginInsert();
2629 736636 : XLogRegisterData(xlrec, tupledata - scratch.data);
2630 736636 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2631 :
2632 736636 : XLogRegisterBufData(0, tupledata, totaldatalen);
2633 :
2634 : /* filtering by origin on a row level is much more efficient */
2635 736636 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2636 :
2637 736636 : recptr = XLogInsert(RM_HEAP2_ID, info);
2638 :
2639 736636 : PageSetLSN(page, recptr);
2640 : }
2641 :
2642 744292 : END_CRIT_SECTION();
2643 :
2644 : /*
2645 : * If we've frozen everything on the page, update the visibilitymap.
2646 : * We're already holding pin on the vmbuffer.
2647 : */
2648 744292 : if (all_frozen_set)
2649 : {
2650 : /*
2651 : * It's fine to use InvalidTransactionId here - this is only used
2652 : * when HEAP_INSERT_FROZEN is specified, which intentionally
2653 : * violates visibility rules.
2654 : */
2655 3322 : visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
2656 : InvalidXLogRecPtr, vmbuffer,
2657 : InvalidTransactionId,
2658 : VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
2659 : }
2660 :
2661 744292 : UnlockReleaseBuffer(buffer);
2662 744292 : ndone += nthispage;
2663 :
2664 : /*
2665 : * NB: Only release vmbuffer after inserting all tuples - it's fairly
2666 : * likely that we'll insert into subsequent heap pages that are likely
2667 : * to use the same vm page.
2668 : */
2669 : }
2670 :
2671 : /* We're done with inserting all tuples, so release the last vmbuffer. */
2672 714702 : if (vmbuffer != InvalidBuffer)
2673 6648 : ReleaseBuffer(vmbuffer);
2674 :
2675 : /*
2676 : * We're done with the actual inserts. Check for conflicts again, to
2677 : * ensure that all rw-conflicts in to these inserts are detected. Without
2678 : * this final check, a sequential scan of the heap may have locked the
2679 : * table after the "before" check, missing one opportunity to detect the
2680 : * conflict, and then scanned the table before the new tuples were there,
2681 : * missing the other chance to detect the conflict.
2682 : *
2683 : * For heap inserts, we only need to check for table-level SSI locks. Our
2684 : * new tuples can't possibly conflict with existing tuple locks, and heap
2685 : * page locks are only consolidated versions of tuple locks; they do not
2686 : * lock "gaps" as index page locks do. So we don't need to specify a
2687 : * buffer when making the call.
2688 : */
2689 714702 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2690 :
2691 : /*
2692 : * If tuples are cachable, mark them for invalidation from the caches in
2693 : * case we abort. Note it is OK to do this after releasing the buffer,
2694 : * because the heaptuples data structure is all in local memory, not in
2695 : * the shared buffer.
2696 : */
2697 714702 : if (IsCatalogRelation(relation))
2698 : {
2699 2450288 : for (i = 0; i < ntuples; i++)
2700 1738012 : CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2701 : }
2702 :
2703 : /* copy t_self fields back to the caller's slots */
2704 3655866 : for (i = 0; i < ntuples; i++)
2705 2941164 : slots[i]->tts_tid = heaptuples[i]->t_self;
2706 :
2707 714702 : pgstat_count_heap_insert(relation, ntuples);
2708 714702 : }
2709 :
2710 : /*
2711 : * simple_heap_insert - insert a tuple
2712 : *
2713 : * Currently, this routine differs from heap_insert only in supplying
2714 : * a default command ID and not allowing access to the speedup options.
2715 : *
2716 : * This should be used rather than using heap_insert directly in most places
2717 : * where we are modifying system catalogs.
2718 : */
2719 : void
2720 1797112 : simple_heap_insert(Relation relation, HeapTuple tup)
2721 : {
2722 1797112 : heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2723 1797112 : }
2724 :
2725 : /*
2726 : * Given infomask/infomask2, compute the bits that must be saved in the
2727 : * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2728 : * xl_heap_lock_updated WAL records.
2729 : *
2730 : * See fix_infomask_from_infobits.
2731 : */
2732 : static uint8
2733 3892544 : compute_infobits(uint16 infomask, uint16 infomask2)
2734 : {
2735 : return
2736 3892544 : ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2737 3892544 : ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2738 3892544 : ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2739 : /* note we ignore HEAP_XMAX_SHR_LOCK here */
2740 7785088 : ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2741 : ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2742 3892544 : XLHL_KEYS_UPDATED : 0);
2743 : }
2744 :
2745 : /*
2746 : * Given two versions of the same t_infomask for a tuple, compare them and
2747 : * return whether the relevant status for a tuple Xmax has changed. This is
2748 : * used after a buffer lock has been released and reacquired: we want to ensure
2749 : * that the tuple state continues to be the same it was when we previously
2750 : * examined it.
2751 : *
2752 : * Note the Xmax field itself must be compared separately.
2753 : */
2754 : static inline bool
2755 10738 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2756 : {
2757 10738 : const uint16 interesting =
2758 : HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2759 :
2760 10738 : if ((new_infomask & interesting) != (old_infomask & interesting))
2761 28 : return true;
2762 :
2763 10710 : return false;
2764 : }
2765 :
2766 : /*
2767 : * heap_delete - delete a tuple
2768 : *
2769 : * See table_tuple_delete() for an explanation of the parameters, except that
2770 : * this routine directly takes a tuple rather than a slot.
2771 : *
2772 : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2773 : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2774 : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2775 : * generated by another transaction).
2776 : */
2777 : TM_Result
2778 2985846 : heap_delete(Relation relation, ItemPointer tid,
2779 : CommandId cid, Snapshot crosscheck, bool wait,
2780 : TM_FailureData *tmfd, bool changingPart)
2781 : {
2782 : TM_Result result;
2783 2985846 : TransactionId xid = GetCurrentTransactionId();
2784 : ItemId lp;
2785 : HeapTupleData tp;
2786 : Page page;
2787 : BlockNumber block;
2788 : Buffer buffer;
2789 2985846 : Buffer vmbuffer = InvalidBuffer;
2790 : TransactionId new_xmax;
2791 : uint16 new_infomask,
2792 : new_infomask2;
2793 2985846 : bool have_tuple_lock = false;
2794 : bool iscombo;
2795 2985846 : bool all_visible_cleared = false;
2796 2985846 : HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2797 2985846 : bool old_key_copied = false;
2798 :
2799 : Assert(ItemPointerIsValid(tid));
2800 :
2801 2985846 : AssertHasSnapshotForToast(relation);
2802 :
2803 : /*
2804 : * Forbid this during a parallel operation, lest it allocate a combo CID.
2805 : * Other workers might need that combo CID for visibility checks, and we
2806 : * have no provision for broadcasting it to them.
2807 : */
2808 2985846 : if (IsInParallelMode())
2809 0 : ereport(ERROR,
2810 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2811 : errmsg("cannot delete tuples during a parallel operation")));
2812 :
2813 2985846 : block = ItemPointerGetBlockNumber(tid);
2814 2985846 : buffer = ReadBuffer(relation, block);
2815 2985846 : page = BufferGetPage(buffer);
2816 :
2817 : /*
2818 : * Before locking the buffer, pin the visibility map page if it appears to
2819 : * be necessary. Since we haven't got the lock yet, someone else might be
2820 : * in the middle of changing this, so we'll need to recheck after we have
2821 : * the lock.
2822 : */
2823 2985846 : if (PageIsAllVisible(page))
2824 486 : visibilitymap_pin(relation, block, &vmbuffer);
2825 :
2826 2985846 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2827 :
2828 2985846 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2829 : Assert(ItemIdIsNormal(lp));
2830 :
2831 2985846 : tp.t_tableOid = RelationGetRelid(relation);
2832 2985846 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2833 2985846 : tp.t_len = ItemIdGetLength(lp);
2834 2985846 : tp.t_self = *tid;
2835 :
2836 2 : l1:
2837 :
2838 : /*
2839 : * If we didn't pin the visibility map page and the page has become all
2840 : * visible while we were busy locking the buffer, we'll have to unlock and
2841 : * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2842 : * unfortunate, but hopefully shouldn't happen often.
2843 : */
2844 2985848 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2845 : {
2846 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2847 0 : visibilitymap_pin(relation, block, &vmbuffer);
2848 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2849 : }
2850 :
2851 2985848 : result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2852 :
2853 2985848 : if (result == TM_Invisible)
2854 : {
2855 0 : UnlockReleaseBuffer(buffer);
2856 0 : ereport(ERROR,
2857 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2858 : errmsg("attempted to delete invisible tuple")));
2859 : }
2860 2985848 : else if (result == TM_BeingModified && wait)
2861 : {
2862 : TransactionId xwait;
2863 : uint16 infomask;
2864 :
2865 : /* must copy state data before unlocking buffer */
2866 81110 : xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2867 81110 : infomask = tp.t_data->t_infomask;
2868 :
2869 : /*
2870 : * Sleep until concurrent transaction ends -- except when there's a
2871 : * single locker and it's our own transaction. Note we don't care
2872 : * which lock mode the locker has, because we need the strongest one.
2873 : *
2874 : * Before sleeping, we need to acquire tuple lock to establish our
2875 : * priority for the tuple (see heap_lock_tuple). LockTuple will
2876 : * release us when we are next-in-line for the tuple.
2877 : *
2878 : * If we are forced to "start over" below, we keep the tuple lock;
2879 : * this arranges that we stay at the head of the line while rechecking
2880 : * tuple state.
2881 : */
2882 81110 : if (infomask & HEAP_XMAX_IS_MULTI)
2883 : {
2884 16 : bool current_is_member = false;
2885 :
2886 16 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2887 : LockTupleExclusive, ¤t_is_member))
2888 : {
2889 16 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2890 :
2891 : /*
2892 : * Acquire the lock, if necessary (but skip it when we're
2893 : * requesting a lock and already have one; avoids deadlock).
2894 : */
2895 16 : if (!current_is_member)
2896 12 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2897 : LockWaitBlock, &have_tuple_lock);
2898 :
2899 : /* wait for multixact */
2900 16 : MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2901 : relation, &(tp.t_self), XLTW_Delete,
2902 : NULL);
2903 16 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2904 :
2905 : /*
2906 : * If xwait had just locked the tuple then some other xact
2907 : * could update this tuple before we get to this point. Check
2908 : * for xmax change, and start over if so.
2909 : *
2910 : * We also must start over if we didn't pin the VM page, and
2911 : * the page has become all visible.
2912 : */
2913 32 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2914 32 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2915 16 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2916 : xwait))
2917 0 : goto l1;
2918 : }
2919 :
2920 : /*
2921 : * You might think the multixact is necessarily done here, but not
2922 : * so: it could have surviving members, namely our own xact or
2923 : * other subxacts of this backend. It is legal for us to delete
2924 : * the tuple in either case, however (the latter case is
2925 : * essentially a situation of upgrading our former shared lock to
2926 : * exclusive). We don't bother changing the on-disk hint bits
2927 : * since we are about to overwrite the xmax altogether.
2928 : */
2929 : }
2930 81094 : else if (!TransactionIdIsCurrentTransactionId(xwait))
2931 : {
2932 : /*
2933 : * Wait for regular transaction to end; but first, acquire tuple
2934 : * lock.
2935 : */
2936 100 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2937 100 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2938 : LockWaitBlock, &have_tuple_lock);
2939 100 : XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2940 92 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2941 :
2942 : /*
2943 : * xwait is done, but if xwait had just locked the tuple then some
2944 : * other xact could update this tuple before we get to this point.
2945 : * Check for xmax change, and start over if so.
2946 : *
2947 : * We also must start over if we didn't pin the VM page, and the
2948 : * page has become all visible.
2949 : */
2950 184 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2951 182 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2952 90 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2953 : xwait))
2954 2 : goto l1;
2955 :
2956 : /* Otherwise check if it committed or aborted */
2957 90 : UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2958 : }
2959 :
2960 : /*
2961 : * We may overwrite if previous xmax aborted, or if it committed but
2962 : * only locked the tuple without updating it.
2963 : */
2964 162160 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2965 81118 : HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
2966 58 : HeapTupleHeaderIsOnlyLocked(tp.t_data))
2967 81050 : result = TM_Ok;
2968 50 : else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2969 42 : result = TM_Updated;
2970 : else
2971 8 : result = TM_Deleted;
2972 : }
2973 :
2974 : /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
2975 : if (result != TM_Ok)
2976 : {
2977 : Assert(result == TM_SelfModified ||
2978 : result == TM_Updated ||
2979 : result == TM_Deleted ||
2980 : result == TM_BeingModified);
2981 : Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2982 : Assert(result != TM_Updated ||
2983 : !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2984 : }
2985 :
2986 2985838 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
2987 : {
2988 : /* Perform additional check for transaction-snapshot mode RI updates */
2989 2 : if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2990 2 : result = TM_Updated;
2991 : }
2992 :
2993 2985838 : if (result != TM_Ok)
2994 : {
2995 120 : tmfd->ctid = tp.t_data->t_ctid;
2996 120 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2997 120 : if (result == TM_SelfModified)
2998 42 : tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2999 : else
3000 78 : tmfd->cmax = InvalidCommandId;
3001 120 : UnlockReleaseBuffer(buffer);
3002 120 : if (have_tuple_lock)
3003 50 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3004 120 : if (vmbuffer != InvalidBuffer)
3005 0 : ReleaseBuffer(vmbuffer);
3006 120 : return result;
3007 : }
3008 :
3009 : /*
3010 : * We're about to do the actual delete -- check for conflict first, to
3011 : * avoid possibly having to roll back work we've just done.
3012 : *
3013 : * This is safe without a recheck as long as there is no possibility of
3014 : * another process scanning the page between this check and the delete
3015 : * being visible to the scan (i.e., an exclusive buffer content lock is
3016 : * continuously held from this point until the tuple delete is visible).
3017 : */
3018 2985718 : CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
3019 :
3020 : /* replace cid with a combo CID if necessary */
3021 2985690 : HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3022 :
3023 : /*
3024 : * Compute replica identity tuple before entering the critical section so
3025 : * we don't PANIC upon a memory allocation failure.
3026 : */
3027 2985690 : old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3028 :
3029 : /*
3030 : * If this is the first possibly-multixact-able operation in the current
3031 : * transaction, set my per-backend OldestMemberMXactId setting. We can be
3032 : * certain that the transaction will never become a member of any older
3033 : * MultiXactIds than that. (We have to do this even if we end up just
3034 : * using our own TransactionId below, since some other backend could
3035 : * incorporate our XID into a MultiXact immediately afterwards.)
3036 : */
3037 2985690 : MultiXactIdSetOldestMember();
3038 :
3039 2985690 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
3040 2985690 : tp.t_data->t_infomask, tp.t_data->t_infomask2,
3041 : xid, LockTupleExclusive, true,
3042 : &new_xmax, &new_infomask, &new_infomask2);
3043 :
3044 2985690 : START_CRIT_SECTION();
3045 :
3046 : /*
3047 : * If this transaction commits, the tuple will become DEAD sooner or
3048 : * later. Set flag that this page is a candidate for pruning once our xid
3049 : * falls below the OldestXmin horizon. If the transaction finally aborts,
3050 : * the subsequent page pruning will be a no-op and the hint will be
3051 : * cleared.
3052 : */
3053 2985690 : PageSetPrunable(page, xid);
3054 :
3055 2985690 : if (PageIsAllVisible(page))
3056 : {
3057 486 : all_visible_cleared = true;
3058 486 : PageClearAllVisible(page);
3059 486 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3060 : vmbuffer, VISIBILITYMAP_VALID_BITS);
3061 : }
3062 :
3063 : /* store transaction information of xact deleting the tuple */
3064 2985690 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3065 2985690 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3066 2985690 : tp.t_data->t_infomask |= new_infomask;
3067 2985690 : tp.t_data->t_infomask2 |= new_infomask2;
3068 2985690 : HeapTupleHeaderClearHotUpdated(tp.t_data);
3069 2985690 : HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3070 2985690 : HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3071 : /* Make sure there is no forward chain link in t_ctid */
3072 2985690 : tp.t_data->t_ctid = tp.t_self;
3073 :
3074 : /* Signal that this is actually a move into another partition */
3075 2985690 : if (changingPart)
3076 968 : HeapTupleHeaderSetMovedPartitions(tp.t_data);
3077 :
3078 2985690 : MarkBufferDirty(buffer);
3079 :
3080 : /*
3081 : * XLOG stuff
3082 : *
3083 : * NB: heap_abort_speculative() uses the same xlog record and replay
3084 : * routines.
3085 : */
3086 2985690 : if (RelationNeedsWAL(relation))
3087 : {
3088 : xl_heap_delete xlrec;
3089 : xl_heap_header xlhdr;
3090 : XLogRecPtr recptr;
3091 :
3092 : /*
3093 : * For logical decode we need combo CIDs to properly decode the
3094 : * catalog
3095 : */
3096 2860452 : if (RelationIsAccessibleInLogicalDecoding(relation))
3097 12526 : log_heap_new_cid(relation, &tp);
3098 :
3099 2860452 : xlrec.flags = 0;
3100 2860452 : if (all_visible_cleared)
3101 486 : xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
3102 2860452 : if (changingPart)
3103 968 : xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
3104 5720904 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3105 2860452 : tp.t_data->t_infomask2);
3106 2860452 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
3107 2860452 : xlrec.xmax = new_xmax;
3108 :
3109 2860452 : if (old_key_tuple != NULL)
3110 : {
3111 94038 : if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3112 264 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
3113 : else
3114 93774 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
3115 : }
3116 :
3117 2860452 : XLogBeginInsert();
3118 2860452 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
3119 :
3120 2860452 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3121 :
3122 : /*
3123 : * Log replica identity of the deleted tuple if there is one
3124 : */
3125 2860452 : if (old_key_tuple != NULL)
3126 : {
3127 94038 : xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3128 94038 : xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3129 94038 : xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3130 :
3131 94038 : XLogRegisterData(&xlhdr, SizeOfHeapHeader);
3132 94038 : XLogRegisterData((char *) old_key_tuple->t_data
3133 : + SizeofHeapTupleHeader,
3134 94038 : old_key_tuple->t_len
3135 : - SizeofHeapTupleHeader);
3136 : }
3137 :
3138 : /* filtering by origin on a row level is much more efficient */
3139 2860452 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
3140 :
3141 2860452 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3142 :
3143 2860452 : PageSetLSN(page, recptr);
3144 : }
3145 :
3146 2985690 : END_CRIT_SECTION();
3147 :
3148 2985690 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3149 :
3150 2985690 : if (vmbuffer != InvalidBuffer)
3151 486 : ReleaseBuffer(vmbuffer);
3152 :
3153 : /*
3154 : * If the tuple has toasted out-of-line attributes, we need to delete
3155 : * those items too. We have to do this before releasing the buffer
3156 : * because we need to look at the contents of the tuple, but it's OK to
3157 : * release the content lock on the buffer first.
3158 : */
3159 2985690 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3160 5144 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3161 : {
3162 : /* toast table entries should never be recursively toasted */
3163 : Assert(!HeapTupleHasExternal(&tp));
3164 : }
3165 2980566 : else if (HeapTupleHasExternal(&tp))
3166 598 : heap_toast_delete(relation, &tp, false);
3167 :
3168 : /*
3169 : * Mark tuple for invalidation from system caches at next command
3170 : * boundary. We have to do this before releasing the buffer because we
3171 : * need to look at the contents of the tuple.
3172 : */
3173 2985690 : CacheInvalidateHeapTuple(relation, &tp, NULL);
3174 :
3175 : /* Now we can release the buffer */
3176 2985690 : ReleaseBuffer(buffer);
3177 :
3178 : /*
3179 : * Release the lmgr tuple lock, if we had it.
3180 : */
3181 2985690 : if (have_tuple_lock)
3182 52 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3183 :
3184 2985690 : pgstat_count_heap_delete(relation);
3185 :
3186 2985690 : if (old_key_tuple != NULL && old_key_copied)
3187 93776 : heap_freetuple(old_key_tuple);
3188 :
3189 2985690 : return TM_Ok;
3190 : }
3191 :
3192 : /*
3193 : * simple_heap_delete - delete a tuple
3194 : *
3195 : * This routine may be used to delete a tuple when concurrent updates of
3196 : * the target tuple are not expected (for example, because we have a lock
3197 : * on the relation associated with the tuple). Any failure is reported
3198 : * via ereport().
3199 : */
3200 : void
3201 1253952 : simple_heap_delete(Relation relation, ItemPointer tid)
3202 : {
3203 : TM_Result result;
3204 : TM_FailureData tmfd;
3205 :
3206 1253952 : result = heap_delete(relation, tid,
3207 : GetCurrentCommandId(true), InvalidSnapshot,
3208 : true /* wait for commit */ ,
3209 : &tmfd, false /* changingPart */ );
3210 1253952 : switch (result)
3211 : {
3212 0 : case TM_SelfModified:
3213 : /* Tuple was already updated in current command? */
3214 0 : elog(ERROR, "tuple already updated by self");
3215 : break;
3216 :
3217 1253952 : case TM_Ok:
3218 : /* done successfully */
3219 1253952 : break;
3220 :
3221 0 : case TM_Updated:
3222 0 : elog(ERROR, "tuple concurrently updated");
3223 : break;
3224 :
3225 0 : case TM_Deleted:
3226 0 : elog(ERROR, "tuple concurrently deleted");
3227 : break;
3228 :
3229 0 : default:
3230 0 : elog(ERROR, "unrecognized heap_delete status: %u", result);
3231 : break;
3232 : }
3233 1253952 : }
3234 :
3235 : /*
3236 : * heap_update - replace a tuple
3237 : *
3238 : * See table_tuple_update() for an explanation of the parameters, except that
3239 : * this routine directly takes a tuple rather than a slot.
3240 : *
3241 : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3242 : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3243 : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3244 : * generated by another transaction).
3245 : */
3246 : TM_Result
3247 609684 : heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3248 : CommandId cid, Snapshot crosscheck, bool wait,
3249 : TM_FailureData *tmfd, LockTupleMode *lockmode,
3250 : TU_UpdateIndexes *update_indexes)
3251 : {
3252 : TM_Result result;
3253 609684 : TransactionId xid = GetCurrentTransactionId();
3254 : Bitmapset *hot_attrs;
3255 : Bitmapset *sum_attrs;
3256 : Bitmapset *key_attrs;
3257 : Bitmapset *id_attrs;
3258 : Bitmapset *interesting_attrs;
3259 : Bitmapset *modified_attrs;
3260 : ItemId lp;
3261 : HeapTupleData oldtup;
3262 : HeapTuple heaptup;
3263 609684 : HeapTuple old_key_tuple = NULL;
3264 609684 : bool old_key_copied = false;
3265 : Page page;
3266 : BlockNumber block;
3267 : MultiXactStatus mxact_status;
3268 : Buffer buffer,
3269 : newbuf,
3270 609684 : vmbuffer = InvalidBuffer,
3271 609684 : vmbuffer_new = InvalidBuffer;
3272 : bool need_toast;
3273 : Size newtupsize,
3274 : pagefree;
3275 609684 : bool have_tuple_lock = false;
3276 : bool iscombo;
3277 609684 : bool use_hot_update = false;
3278 609684 : bool summarized_update = false;
3279 : bool key_intact;
3280 609684 : bool all_visible_cleared = false;
3281 609684 : bool all_visible_cleared_new = false;
3282 : bool checked_lockers;
3283 : bool locker_remains;
3284 609684 : bool id_has_external = false;
3285 : TransactionId xmax_new_tuple,
3286 : xmax_old_tuple;
3287 : uint16 infomask_old_tuple,
3288 : infomask2_old_tuple,
3289 : infomask_new_tuple,
3290 : infomask2_new_tuple;
3291 :
3292 : Assert(ItemPointerIsValid(otid));
3293 :
3294 : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
3295 : Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
3296 : RelationGetNumberOfAttributes(relation));
3297 :
3298 609684 : AssertHasSnapshotForToast(relation);
3299 :
3300 : /*
3301 : * Forbid this during a parallel operation, lest it allocate a combo CID.
3302 : * Other workers might need that combo CID for visibility checks, and we
3303 : * have no provision for broadcasting it to them.
3304 : */
3305 609684 : if (IsInParallelMode())
3306 0 : ereport(ERROR,
3307 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3308 : errmsg("cannot update tuples during a parallel operation")));
3309 :
3310 : #ifdef USE_ASSERT_CHECKING
3311 : check_lock_if_inplace_updateable_rel(relation, otid, newtup);
3312 : #endif
3313 :
3314 : /*
3315 : * Fetch the list of attributes to be checked for various operations.
3316 : *
3317 : * For HOT considerations, this is wasted effort if we fail to update or
3318 : * have to put the new tuple on a different page. But we must compute the
3319 : * list before obtaining buffer lock --- in the worst case, if we are
3320 : * doing an update on one of the relevant system catalogs, we could
3321 : * deadlock if we try to fetch the list later. In any case, the relcache
3322 : * caches the data so this is usually pretty cheap.
3323 : *
3324 : * We also need columns used by the replica identity and columns that are
3325 : * considered the "key" of rows in the table.
3326 : *
3327 : * Note that we get copies of each bitmap, so we need not worry about
3328 : * relcache flush happening midway through.
3329 : */
3330 609684 : hot_attrs = RelationGetIndexAttrBitmap(relation,
3331 : INDEX_ATTR_BITMAP_HOT_BLOCKING);
3332 609684 : sum_attrs = RelationGetIndexAttrBitmap(relation,
3333 : INDEX_ATTR_BITMAP_SUMMARIZED);
3334 609684 : key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3335 609684 : id_attrs = RelationGetIndexAttrBitmap(relation,
3336 : INDEX_ATTR_BITMAP_IDENTITY_KEY);
3337 609684 : interesting_attrs = NULL;
3338 609684 : interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
3339 609684 : interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
3340 609684 : interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
3341 609684 : interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
3342 :
3343 609684 : block = ItemPointerGetBlockNumber(otid);
3344 609684 : INJECTION_POINT("heap_update-before-pin", NULL);
3345 609684 : buffer = ReadBuffer(relation, block);
3346 609684 : page = BufferGetPage(buffer);
3347 :
3348 : /*
3349 : * Before locking the buffer, pin the visibility map page if it appears to
3350 : * be necessary. Since we haven't got the lock yet, someone else might be
3351 : * in the middle of changing this, so we'll need to recheck after we have
3352 : * the lock.
3353 : */
3354 609684 : if (PageIsAllVisible(page))
3355 3068 : visibilitymap_pin(relation, block, &vmbuffer);
3356 :
3357 609684 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3358 :
3359 609684 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3360 :
3361 : /*
3362 : * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3363 : * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3364 : * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3365 : * of which indicates concurrent pruning.
3366 : *
3367 : * Failing with TM_Updated would be most accurate. However, unlike other
3368 : * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3369 : * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3370 : * does matter to SQL statements UPDATE and MERGE, those SQL statements
3371 : * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3372 : * TM_Updated and TM_Deleted affects only the wording of error messages.
3373 : * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3374 : * the specification of when tmfd->ctid is valid. Second, it creates
3375 : * error log evidence that we took this branch.
3376 : *
3377 : * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3378 : * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3379 : * unrelated row, we'll fail with "duplicate key value violates unique".
3380 : * XXX if otid is the live, newer version of the newtup row, we'll discard
3381 : * changes originating in versions of this catalog row after the version
3382 : * the caller got from syscache. See syscache-update-pruned.spec.
3383 : */
3384 609684 : if (!ItemIdIsNormal(lp))
3385 : {
3386 : Assert(RelationSupportsSysCache(RelationGetRelid(relation)));
3387 :
3388 2 : UnlockReleaseBuffer(buffer);
3389 : Assert(!have_tuple_lock);
3390 2 : if (vmbuffer != InvalidBuffer)
3391 2 : ReleaseBuffer(vmbuffer);
3392 2 : tmfd->ctid = *otid;
3393 2 : tmfd->xmax = InvalidTransactionId;
3394 2 : tmfd->cmax = InvalidCommandId;
3395 2 : *update_indexes = TU_None;
3396 :
3397 2 : bms_free(hot_attrs);
3398 2 : bms_free(sum_attrs);
3399 2 : bms_free(key_attrs);
3400 2 : bms_free(id_attrs);
3401 : /* modified_attrs not yet initialized */
3402 2 : bms_free(interesting_attrs);
3403 2 : return TM_Deleted;
3404 : }
3405 :
3406 : /*
3407 : * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3408 : * properly.
3409 : */
3410 609682 : oldtup.t_tableOid = RelationGetRelid(relation);
3411 609682 : oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3412 609682 : oldtup.t_len = ItemIdGetLength(lp);
3413 609682 : oldtup.t_self = *otid;
3414 :
3415 : /* the new tuple is ready, except for this: */
3416 609682 : newtup->t_tableOid = RelationGetRelid(relation);
3417 :
3418 : /*
3419 : * Determine columns modified by the update. Additionally, identify
3420 : * whether any of the unmodified replica identity key attributes in the
3421 : * old tuple is externally stored or not. This is required because for
3422 : * such attributes the flattened value won't be WAL logged as part of the
3423 : * new tuple so we must include it as part of the old_key_tuple. See
3424 : * ExtractReplicaIdentity.
3425 : */
3426 609682 : modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
3427 : id_attrs, &oldtup,
3428 : newtup, &id_has_external);
3429 :
3430 : /*
3431 : * If we're not updating any "key" column, we can grab a weaker lock type.
3432 : * This allows for more concurrency when we are running simultaneously
3433 : * with foreign key checks.
3434 : *
3435 : * Note that if a column gets detoasted while executing the update, but
3436 : * the value ends up being the same, this test will fail and we will use
3437 : * the stronger lock. This is acceptable; the important case to optimize
3438 : * is updates that don't manipulate key columns, not those that
3439 : * serendipitously arrive at the same key values.
3440 : */
3441 609682 : if (!bms_overlap(modified_attrs, key_attrs))
3442 : {
3443 601396 : *lockmode = LockTupleNoKeyExclusive;
3444 601396 : mxact_status = MultiXactStatusNoKeyUpdate;
3445 601396 : key_intact = true;
3446 :
3447 : /*
3448 : * If this is the first possibly-multixact-able operation in the
3449 : * current transaction, set my per-backend OldestMemberMXactId
3450 : * setting. We can be certain that the transaction will never become a
3451 : * member of any older MultiXactIds than that. (We have to do this
3452 : * even if we end up just using our own TransactionId below, since
3453 : * some other backend could incorporate our XID into a MultiXact
3454 : * immediately afterwards.)
3455 : */
3456 601396 : MultiXactIdSetOldestMember();
3457 : }
3458 : else
3459 : {
3460 8286 : *lockmode = LockTupleExclusive;
3461 8286 : mxact_status = MultiXactStatusUpdate;
3462 8286 : key_intact = false;
3463 : }
3464 :
3465 : /*
3466 : * Note: beyond this point, use oldtup not otid to refer to old tuple.
3467 : * otid may very well point at newtup->t_self, which we will overwrite
3468 : * with the new tuple's location, so there's great risk of confusion if we
3469 : * use otid anymore.
3470 : */
3471 :
3472 609682 : l2:
3473 609684 : checked_lockers = false;
3474 609684 : locker_remains = false;
3475 609684 : result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3476 :
3477 : /* see below about the "no wait" case */
3478 : Assert(result != TM_BeingModified || wait);
3479 :
3480 609684 : if (result == TM_Invisible)
3481 : {
3482 0 : UnlockReleaseBuffer(buffer);
3483 0 : ereport(ERROR,
3484 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3485 : errmsg("attempted to update invisible tuple")));
3486 : }
3487 609684 : else if (result == TM_BeingModified && wait)
3488 : {
3489 : TransactionId xwait;
3490 : uint16 infomask;
3491 71908 : bool can_continue = false;
3492 :
3493 : /*
3494 : * XXX note that we don't consider the "no wait" case here. This
3495 : * isn't a problem currently because no caller uses that case, but it
3496 : * should be fixed if such a caller is introduced. It wasn't a
3497 : * problem previously because this code would always wait, but now
3498 : * that some tuple locks do not conflict with one of the lock modes we
3499 : * use, it is possible that this case is interesting to handle
3500 : * specially.
3501 : *
3502 : * This may cause failures with third-party code that calls
3503 : * heap_update directly.
3504 : */
3505 :
3506 : /* must copy state data before unlocking buffer */
3507 71908 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3508 71908 : infomask = oldtup.t_data->t_infomask;
3509 :
3510 : /*
3511 : * Now we have to do something about the existing locker. If it's a
3512 : * multi, sleep on it; we might be awakened before it is completely
3513 : * gone (or even not sleep at all in some cases); we need to preserve
3514 : * it as locker, unless it is gone completely.
3515 : *
3516 : * If it's not a multi, we need to check for sleeping conditions
3517 : * before actually going to sleep. If the update doesn't conflict
3518 : * with the locks, we just continue without sleeping (but making sure
3519 : * it is preserved).
3520 : *
3521 : * Before sleeping, we need to acquire tuple lock to establish our
3522 : * priority for the tuple (see heap_lock_tuple). LockTuple will
3523 : * release us when we are next-in-line for the tuple. Note we must
3524 : * not acquire the tuple lock until we're sure we're going to sleep;
3525 : * otherwise we're open for race conditions with other transactions
3526 : * holding the tuple lock which sleep on us.
3527 : *
3528 : * If we are forced to "start over" below, we keep the tuple lock;
3529 : * this arranges that we stay at the head of the line while rechecking
3530 : * tuple state.
3531 : */
3532 71908 : if (infomask & HEAP_XMAX_IS_MULTI)
3533 : {
3534 : TransactionId update_xact;
3535 : int remain;
3536 120 : bool current_is_member = false;
3537 :
3538 120 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3539 : *lockmode, ¤t_is_member))
3540 : {
3541 16 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3542 :
3543 : /*
3544 : * Acquire the lock, if necessary (but skip it when we're
3545 : * requesting a lock and already have one; avoids deadlock).
3546 : */
3547 16 : if (!current_is_member)
3548 0 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3549 : LockWaitBlock, &have_tuple_lock);
3550 :
3551 : /* wait for multixact */
3552 16 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3553 : relation, &oldtup.t_self, XLTW_Update,
3554 : &remain);
3555 16 : checked_lockers = true;
3556 16 : locker_remains = remain != 0;
3557 16 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3558 :
3559 : /*
3560 : * If xwait had just locked the tuple then some other xact
3561 : * could update this tuple before we get to this point. Check
3562 : * for xmax change, and start over if so.
3563 : */
3564 16 : if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3565 16 : infomask) ||
3566 16 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3567 : xwait))
3568 0 : goto l2;
3569 : }
3570 :
3571 : /*
3572 : * Note that the multixact may not be done by now. It could have
3573 : * surviving members; our own xact or other subxacts of this
3574 : * backend, and also any other concurrent transaction that locked
3575 : * the tuple with LockTupleKeyShare if we only got
3576 : * LockTupleNoKeyExclusive. If this is the case, we have to be
3577 : * careful to mark the updated tuple with the surviving members in
3578 : * Xmax.
3579 : *
3580 : * Note that there could have been another update in the
3581 : * MultiXact. In that case, we need to check whether it committed
3582 : * or aborted. If it aborted we are safe to update it again;
3583 : * otherwise there is an update conflict, and we have to return
3584 : * TableTuple{Deleted, Updated} below.
3585 : *
3586 : * In the LockTupleExclusive case, we still need to preserve the
3587 : * surviving members: those would include the tuple locks we had
3588 : * before this one, which are important to keep in case this
3589 : * subxact aborts.
3590 : */
3591 120 : if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3592 16 : update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3593 : else
3594 104 : update_xact = InvalidTransactionId;
3595 :
3596 : /*
3597 : * There was no UPDATE in the MultiXact; or it aborted. No
3598 : * TransactionIdIsInProgress() call needed here, since we called
3599 : * MultiXactIdWait() above.
3600 : */
3601 136 : if (!TransactionIdIsValid(update_xact) ||
3602 16 : TransactionIdDidAbort(update_xact))
3603 106 : can_continue = true;
3604 : }
3605 71788 : else if (TransactionIdIsCurrentTransactionId(xwait))
3606 : {
3607 : /*
3608 : * The only locker is ourselves; we can avoid grabbing the tuple
3609 : * lock here, but must preserve our locking information.
3610 : */
3611 71572 : checked_lockers = true;
3612 71572 : locker_remains = true;
3613 71572 : can_continue = true;
3614 : }
3615 216 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3616 : {
3617 : /*
3618 : * If it's just a key-share locker, and we're not changing the key
3619 : * columns, we don't need to wait for it to end; but we need to
3620 : * preserve it as locker.
3621 : */
3622 58 : checked_lockers = true;
3623 58 : locker_remains = true;
3624 58 : can_continue = true;
3625 : }
3626 : else
3627 : {
3628 : /*
3629 : * Wait for regular transaction to end; but first, acquire tuple
3630 : * lock.
3631 : */
3632 158 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3633 158 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3634 : LockWaitBlock, &have_tuple_lock);
3635 158 : XactLockTableWait(xwait, relation, &oldtup.t_self,
3636 : XLTW_Update);
3637 158 : checked_lockers = true;
3638 158 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3639 :
3640 : /*
3641 : * xwait is done, but if xwait had just locked the tuple then some
3642 : * other xact could update this tuple before we get to this point.
3643 : * Check for xmax change, and start over if so.
3644 : */
3645 314 : if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3646 156 : !TransactionIdEquals(xwait,
3647 : HeapTupleHeaderGetRawXmax(oldtup.t_data)))
3648 2 : goto l2;
3649 :
3650 : /* Otherwise check if it committed or aborted */
3651 156 : UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3652 156 : if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3653 44 : can_continue = true;
3654 : }
3655 :
3656 71906 : if (can_continue)
3657 71780 : result = TM_Ok;
3658 126 : else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
3659 116 : result = TM_Updated;
3660 : else
3661 10 : result = TM_Deleted;
3662 : }
3663 :
3664 : /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3665 : if (result != TM_Ok)
3666 : {
3667 : Assert(result == TM_SelfModified ||
3668 : result == TM_Updated ||
3669 : result == TM_Deleted ||
3670 : result == TM_BeingModified);
3671 : Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3672 : Assert(result != TM_Updated ||
3673 : !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3674 : }
3675 :
3676 609682 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
3677 : {
3678 : /* Perform additional check for transaction-snapshot mode RI updates */
3679 2 : if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3680 2 : result = TM_Updated;
3681 : }
3682 :
3683 609682 : if (result != TM_Ok)
3684 : {
3685 320 : tmfd->ctid = oldtup.t_data->t_ctid;
3686 320 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3687 320 : if (result == TM_SelfModified)
3688 104 : tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3689 : else
3690 216 : tmfd->cmax = InvalidCommandId;
3691 320 : UnlockReleaseBuffer(buffer);
3692 320 : if (have_tuple_lock)
3693 112 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3694 320 : if (vmbuffer != InvalidBuffer)
3695 0 : ReleaseBuffer(vmbuffer);
3696 320 : *update_indexes = TU_None;
3697 :
3698 320 : bms_free(hot_attrs);
3699 320 : bms_free(sum_attrs);
3700 320 : bms_free(key_attrs);
3701 320 : bms_free(id_attrs);
3702 320 : bms_free(modified_attrs);
3703 320 : bms_free(interesting_attrs);
3704 320 : return result;
3705 : }
3706 :
3707 : /*
3708 : * If we didn't pin the visibility map page and the page has become all
3709 : * visible while we were busy locking the buffer, or during some
3710 : * subsequent window during which we had it unlocked, we'll have to unlock
3711 : * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3712 : * bit unfortunate, especially since we'll now have to recheck whether the
3713 : * tuple has been locked or updated under us, but hopefully it won't
3714 : * happen very often.
3715 : */
3716 609362 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3717 : {
3718 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3719 0 : visibilitymap_pin(relation, block, &vmbuffer);
3720 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3721 0 : goto l2;
3722 : }
3723 :
3724 : /* Fill in transaction status data */
3725 :
3726 : /*
3727 : * If the tuple we're updating is locked, we need to preserve the locking
3728 : * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3729 : */
3730 609362 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3731 609362 : oldtup.t_data->t_infomask,
3732 609362 : oldtup.t_data->t_infomask2,
3733 : xid, *lockmode, true,
3734 : &xmax_old_tuple, &infomask_old_tuple,
3735 : &infomask2_old_tuple);
3736 :
3737 : /*
3738 : * And also prepare an Xmax value for the new copy of the tuple. If there
3739 : * was no xmax previously, or there was one but all lockers are now gone,
3740 : * then use InvalidTransactionId; otherwise, get the xmax from the old
3741 : * tuple. (In rare cases that might also be InvalidTransactionId and yet
3742 : * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3743 : */
3744 681098 : if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3745 143472 : HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
3746 71632 : (checked_lockers && !locker_remains))
3747 537626 : xmax_new_tuple = InvalidTransactionId;
3748 : else
3749 71736 : xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3750 :
3751 609362 : if (!TransactionIdIsValid(xmax_new_tuple))
3752 : {
3753 537626 : infomask_new_tuple = HEAP_XMAX_INVALID;
3754 537626 : infomask2_new_tuple = 0;
3755 : }
3756 : else
3757 : {
3758 : /*
3759 : * If we found a valid Xmax for the new tuple, then the infomask bits
3760 : * to use on the new tuple depend on what was there on the old one.
3761 : * Note that since we're doing an update, the only possibility is that
3762 : * the lockers had FOR KEY SHARE lock.
3763 : */
3764 71736 : if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3765 : {
3766 106 : GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3767 : &infomask2_new_tuple);
3768 : }
3769 : else
3770 : {
3771 71630 : infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3772 71630 : infomask2_new_tuple = 0;
3773 : }
3774 : }
3775 :
3776 : /*
3777 : * Prepare the new tuple with the appropriate initial values of Xmin and
3778 : * Xmax, as well as initial infomask bits as computed above.
3779 : */
3780 609362 : newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3781 609362 : newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3782 609362 : HeapTupleHeaderSetXmin(newtup->t_data, xid);
3783 609362 : HeapTupleHeaderSetCmin(newtup->t_data, cid);
3784 609362 : newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3785 609362 : newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3786 609362 : HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3787 :
3788 : /*
3789 : * Replace cid with a combo CID if necessary. Note that we already put
3790 : * the plain cid into the new tuple.
3791 : */
3792 609362 : HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3793 :
3794 : /*
3795 : * If the toaster needs to be activated, OR if the new tuple will not fit
3796 : * on the same page as the old, then we need to release the content lock
3797 : * (but not the pin!) on the old tuple's buffer while we are off doing
3798 : * TOAST and/or table-file-extension work. We must mark the old tuple to
3799 : * show that it's locked, else other processes may try to update it
3800 : * themselves.
3801 : *
3802 : * We need to invoke the toaster if there are already any out-of-line
3803 : * toasted values present, or if the new tuple is over-threshold.
3804 : */
3805 609362 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3806 0 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3807 : {
3808 : /* toast table entries should never be recursively toasted */
3809 : Assert(!HeapTupleHasExternal(&oldtup));
3810 : Assert(!HeapTupleHasExternal(newtup));
3811 0 : need_toast = false;
3812 : }
3813 : else
3814 1827342 : need_toast = (HeapTupleHasExternal(&oldtup) ||
3815 1217980 : HeapTupleHasExternal(newtup) ||
3816 608570 : newtup->t_len > TOAST_TUPLE_THRESHOLD);
3817 :
3818 609362 : pagefree = PageGetHeapFreeSpace(page);
3819 :
3820 609362 : newtupsize = MAXALIGN(newtup->t_len);
3821 :
3822 609362 : if (need_toast || newtupsize > pagefree)
3823 299868 : {
3824 : TransactionId xmax_lock_old_tuple;
3825 : uint16 infomask_lock_old_tuple,
3826 : infomask2_lock_old_tuple;
3827 299868 : bool cleared_all_frozen = false;
3828 :
3829 : /*
3830 : * To prevent concurrent sessions from updating the tuple, we have to
3831 : * temporarily mark it locked, while we release the page-level lock.
3832 : *
3833 : * To satisfy the rule that any xid potentially appearing in a buffer
3834 : * written out to disk, we unfortunately have to WAL log this
3835 : * temporary modification. We can reuse xl_heap_lock for this
3836 : * purpose. If we crash/error before following through with the
3837 : * actual update, xmax will be of an aborted transaction, allowing
3838 : * other sessions to proceed.
3839 : */
3840 :
3841 : /*
3842 : * Compute xmax / infomask appropriate for locking the tuple. This has
3843 : * to be done separately from the combo that's going to be used for
3844 : * updating, because the potentially created multixact would otherwise
3845 : * be wrong.
3846 : */
3847 299868 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3848 299868 : oldtup.t_data->t_infomask,
3849 299868 : oldtup.t_data->t_infomask2,
3850 : xid, *lockmode, false,
3851 : &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3852 : &infomask2_lock_old_tuple);
3853 :
3854 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
3855 :
3856 299868 : START_CRIT_SECTION();
3857 :
3858 : /* Clear obsolete visibility flags ... */
3859 299868 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3860 299868 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3861 299868 : HeapTupleClearHotUpdated(&oldtup);
3862 : /* ... and store info about transaction updating this tuple */
3863 : Assert(TransactionIdIsValid(xmax_lock_old_tuple));
3864 299868 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
3865 299868 : oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3866 299868 : oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3867 299868 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3868 :
3869 : /* temporarily make it look not-updated, but locked */
3870 299868 : oldtup.t_data->t_ctid = oldtup.t_self;
3871 :
3872 : /*
3873 : * Clear all-frozen bit on visibility map if needed. We could
3874 : * immediately reset ALL_VISIBLE, but given that the WAL logging
3875 : * overhead would be unchanged, that doesn't seem necessarily
3876 : * worthwhile.
3877 : */
3878 301730 : if (PageIsAllVisible(page) &&
3879 1862 : visibilitymap_clear(relation, block, vmbuffer,
3880 : VISIBILITYMAP_ALL_FROZEN))
3881 1460 : cleared_all_frozen = true;
3882 :
3883 299868 : MarkBufferDirty(buffer);
3884 :
3885 299868 : if (RelationNeedsWAL(relation))
3886 : {
3887 : xl_heap_lock xlrec;
3888 : XLogRecPtr recptr;
3889 :
3890 279608 : XLogBeginInsert();
3891 279608 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3892 :
3893 279608 : xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3894 279608 : xlrec.xmax = xmax_lock_old_tuple;
3895 559216 : xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3896 279608 : oldtup.t_data->t_infomask2);
3897 279608 : xlrec.flags =
3898 279608 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
3899 279608 : XLogRegisterData(&xlrec, SizeOfHeapLock);
3900 279608 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
3901 279608 : PageSetLSN(page, recptr);
3902 : }
3903 :
3904 299868 : END_CRIT_SECTION();
3905 :
3906 299868 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3907 :
3908 : /*
3909 : * Let the toaster do its thing, if needed.
3910 : *
3911 : * Note: below this point, heaptup is the data we actually intend to
3912 : * store into the relation; newtup is the caller's original untoasted
3913 : * data.
3914 : */
3915 299868 : if (need_toast)
3916 : {
3917 : /* Note we always use WAL and FSM during updates */
3918 3294 : heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
3919 3294 : newtupsize = MAXALIGN(heaptup->t_len);
3920 : }
3921 : else
3922 296574 : heaptup = newtup;
3923 :
3924 : /*
3925 : * Now, do we need a new page for the tuple, or not? This is a bit
3926 : * tricky since someone else could have added tuples to the page while
3927 : * we weren't looking. We have to recheck the available space after
3928 : * reacquiring the buffer lock. But don't bother to do that if the
3929 : * former amount of free space is still not enough; it's unlikely
3930 : * there's more free now than before.
3931 : *
3932 : * What's more, if we need to get a new page, we will need to acquire
3933 : * buffer locks on both old and new pages. To avoid deadlock against
3934 : * some other backend trying to get the same two locks in the other
3935 : * order, we must be consistent about the order we get the locks in.
3936 : * We use the rule "lock the lower-numbered page of the relation
3937 : * first". To implement this, we must do RelationGetBufferForTuple
3938 : * while not holding the lock on the old page, and we must rely on it
3939 : * to get the locks on both pages in the correct order.
3940 : *
3941 : * Another consideration is that we need visibility map page pin(s) if
3942 : * we will have to clear the all-visible flag on either page. If we
3943 : * call RelationGetBufferForTuple, we rely on it to acquire any such
3944 : * pins; but if we don't, we have to handle that here. Hence we need
3945 : * a loop.
3946 : */
3947 : for (;;)
3948 : {
3949 299870 : if (newtupsize > pagefree)
3950 : {
3951 : /* It doesn't fit, must use RelationGetBufferForTuple. */
3952 298768 : newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3953 : buffer, 0, NULL,
3954 : &vmbuffer_new, &vmbuffer,
3955 : 0);
3956 : /* We're all done. */
3957 298768 : break;
3958 : }
3959 : /* Acquire VM page pin if needed and we don't have it. */
3960 1102 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3961 0 : visibilitymap_pin(relation, block, &vmbuffer);
3962 : /* Re-acquire the lock on the old tuple's page. */
3963 1102 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3964 : /* Re-check using the up-to-date free space */
3965 1102 : pagefree = PageGetHeapFreeSpace(page);
3966 1102 : if (newtupsize > pagefree ||
3967 1100 : (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
3968 : {
3969 : /*
3970 : * Rats, it doesn't fit anymore, or somebody just now set the
3971 : * all-visible flag. We must now unlock and loop to avoid
3972 : * deadlock. Fortunately, this path should seldom be taken.
3973 : */
3974 2 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3975 : }
3976 : else
3977 : {
3978 : /* We're all done. */
3979 1100 : newbuf = buffer;
3980 1100 : break;
3981 : }
3982 : }
3983 : }
3984 : else
3985 : {
3986 : /* No TOAST work needed, and it'll fit on same page */
3987 309494 : newbuf = buffer;
3988 309494 : heaptup = newtup;
3989 : }
3990 :
3991 : /*
3992 : * We're about to do the actual update -- check for conflict first, to
3993 : * avoid possibly having to roll back work we've just done.
3994 : *
3995 : * This is safe without a recheck as long as there is no possibility of
3996 : * another process scanning the pages between this check and the update
3997 : * being visible to the scan (i.e., exclusive buffer content lock(s) are
3998 : * continuously held from this point until the tuple update is visible).
3999 : *
4000 : * For the new tuple the only check needed is at the relation level, but
4001 : * since both tuples are in the same relation and the check for oldtup
4002 : * will include checking the relation level, there is no benefit to a
4003 : * separate check for the new tuple.
4004 : */
4005 609362 : CheckForSerializableConflictIn(relation, &oldtup.t_self,
4006 : BufferGetBlockNumber(buffer));
4007 :
4008 : /*
4009 : * At this point newbuf and buffer are both pinned and locked, and newbuf
4010 : * has enough space for the new tuple. If they are the same buffer, only
4011 : * one pin is held.
4012 : */
4013 :
4014 609338 : if (newbuf == buffer)
4015 : {
4016 : /*
4017 : * Since the new tuple is going into the same page, we might be able
4018 : * to do a HOT update. Check if any of the index columns have been
4019 : * changed.
4020 : */
4021 310570 : if (!bms_overlap(modified_attrs, hot_attrs))
4022 : {
4023 286766 : use_hot_update = true;
4024 :
4025 : /*
4026 : * If none of the columns that are used in hot-blocking indexes
4027 : * were updated, we can apply HOT, but we do still need to check
4028 : * if we need to update the summarizing indexes, and update those
4029 : * indexes if the columns were updated, or we may fail to detect
4030 : * e.g. value bound changes in BRIN minmax indexes.
4031 : */
4032 286766 : if (bms_overlap(modified_attrs, sum_attrs))
4033 3282 : summarized_update = true;
4034 : }
4035 : }
4036 : else
4037 : {
4038 : /* Set a hint that the old page could use prune/defrag */
4039 298768 : PageSetFull(page);
4040 : }
4041 :
4042 : /*
4043 : * Compute replica identity tuple before entering the critical section so
4044 : * we don't PANIC upon a memory allocation failure.
4045 : * ExtractReplicaIdentity() will return NULL if nothing needs to be
4046 : * logged. Pass old key required as true only if the replica identity key
4047 : * columns are modified or it has external data.
4048 : */
4049 609338 : old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
4050 609338 : bms_overlap(modified_attrs, id_attrs) ||
4051 : id_has_external,
4052 : &old_key_copied);
4053 :
4054 : /* NO EREPORT(ERROR) from here till changes are logged */
4055 609338 : START_CRIT_SECTION();
4056 :
4057 : /*
4058 : * If this transaction commits, the old tuple will become DEAD sooner or
4059 : * later. Set flag that this page is a candidate for pruning once our xid
4060 : * falls below the OldestXmin horizon. If the transaction finally aborts,
4061 : * the subsequent page pruning will be a no-op and the hint will be
4062 : * cleared.
4063 : *
4064 : * XXX Should we set hint on newbuf as well? If the transaction aborts,
4065 : * there would be a prunable tuple in the newbuf; but for now we choose
4066 : * not to optimize for aborts. Note that heap_xlog_update must be kept in
4067 : * sync if this decision changes.
4068 : */
4069 609338 : PageSetPrunable(page, xid);
4070 :
4071 609338 : if (use_hot_update)
4072 : {
4073 : /* Mark the old tuple as HOT-updated */
4074 286766 : HeapTupleSetHotUpdated(&oldtup);
4075 : /* And mark the new tuple as heap-only */
4076 286766 : HeapTupleSetHeapOnly(heaptup);
4077 : /* Mark the caller's copy too, in case different from heaptup */
4078 286766 : HeapTupleSetHeapOnly(newtup);
4079 : }
4080 : else
4081 : {
4082 : /* Make sure tuples are correctly marked as not-HOT */
4083 322572 : HeapTupleClearHotUpdated(&oldtup);
4084 322572 : HeapTupleClearHeapOnly(heaptup);
4085 322572 : HeapTupleClearHeapOnly(newtup);
4086 : }
4087 :
4088 609338 : RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4089 :
4090 :
4091 : /* Clear obsolete visibility flags, possibly set by ourselves above... */
4092 609338 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4093 609338 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4094 : /* ... and store info about transaction updating this tuple */
4095 : Assert(TransactionIdIsValid(xmax_old_tuple));
4096 609338 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
4097 609338 : oldtup.t_data->t_infomask |= infomask_old_tuple;
4098 609338 : oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4099 609338 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
4100 :
4101 : /* record address of new tuple in t_ctid of old one */
4102 609338 : oldtup.t_data->t_ctid = heaptup->t_self;
4103 :
4104 : /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
4105 609338 : if (PageIsAllVisible(BufferGetPage(buffer)))
4106 : {
4107 3066 : all_visible_cleared = true;
4108 3066 : PageClearAllVisible(BufferGetPage(buffer));
4109 3066 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4110 : vmbuffer, VISIBILITYMAP_VALID_BITS);
4111 : }
4112 609338 : if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
4113 : {
4114 1780 : all_visible_cleared_new = true;
4115 1780 : PageClearAllVisible(BufferGetPage(newbuf));
4116 1780 : visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
4117 : vmbuffer_new, VISIBILITYMAP_VALID_BITS);
4118 : }
4119 :
4120 609338 : if (newbuf != buffer)
4121 298768 : MarkBufferDirty(newbuf);
4122 609338 : MarkBufferDirty(buffer);
4123 :
4124 : /* XLOG stuff */
4125 609338 : if (RelationNeedsWAL(relation))
4126 : {
4127 : XLogRecPtr recptr;
4128 :
4129 : /*
4130 : * For logical decoding we need combo CIDs to properly decode the
4131 : * catalog.
4132 : */
4133 586614 : if (RelationIsAccessibleInLogicalDecoding(relation))
4134 : {
4135 5112 : log_heap_new_cid(relation, &oldtup);
4136 5112 : log_heap_new_cid(relation, heaptup);
4137 : }
4138 :
4139 586614 : recptr = log_heap_update(relation, buffer,
4140 : newbuf, &oldtup, heaptup,
4141 : old_key_tuple,
4142 : all_visible_cleared,
4143 : all_visible_cleared_new);
4144 586614 : if (newbuf != buffer)
4145 : {
4146 278520 : PageSetLSN(BufferGetPage(newbuf), recptr);
4147 : }
4148 586614 : PageSetLSN(BufferGetPage(buffer), recptr);
4149 : }
4150 :
4151 609338 : END_CRIT_SECTION();
4152 :
4153 609338 : if (newbuf != buffer)
4154 298768 : LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
4155 609338 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4156 :
4157 : /*
4158 : * Mark old tuple for invalidation from system caches at next command
4159 : * boundary, and mark the new tuple for invalidation in case we abort. We
4160 : * have to do this before releasing the buffer because oldtup is in the
4161 : * buffer. (heaptup is all in local memory, but it's necessary to process
4162 : * both tuple versions in one call to inval.c so we can avoid redundant
4163 : * sinval messages.)
4164 : */
4165 609338 : CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
4166 :
4167 : /* Now we can release the buffer(s) */
4168 609338 : if (newbuf != buffer)
4169 298768 : ReleaseBuffer(newbuf);
4170 609338 : ReleaseBuffer(buffer);
4171 609338 : if (BufferIsValid(vmbuffer_new))
4172 1780 : ReleaseBuffer(vmbuffer_new);
4173 609338 : if (BufferIsValid(vmbuffer))
4174 3066 : ReleaseBuffer(vmbuffer);
4175 :
4176 : /*
4177 : * Release the lmgr tuple lock, if we had it.
4178 : */
4179 609338 : if (have_tuple_lock)
4180 44 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4181 :
4182 609338 : pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4183 :
4184 : /*
4185 : * If heaptup is a private copy, release it. Don't forget to copy t_self
4186 : * back to the caller's image, too.
4187 : */
4188 609338 : if (heaptup != newtup)
4189 : {
4190 3198 : newtup->t_self = heaptup->t_self;
4191 3198 : heap_freetuple(heaptup);
4192 : }
4193 :
4194 : /*
4195 : * If it is a HOT update, the update may still need to update summarized
4196 : * indexes, lest we fail to update those summaries and get incorrect
4197 : * results (for example, minmax bounds of the block may change with this
4198 : * update).
4199 : */
4200 609338 : if (use_hot_update)
4201 : {
4202 286766 : if (summarized_update)
4203 3282 : *update_indexes = TU_Summarizing;
4204 : else
4205 283484 : *update_indexes = TU_None;
4206 : }
4207 : else
4208 322572 : *update_indexes = TU_All;
4209 :
4210 609338 : if (old_key_tuple != NULL && old_key_copied)
4211 168 : heap_freetuple(old_key_tuple);
4212 :
4213 609338 : bms_free(hot_attrs);
4214 609338 : bms_free(sum_attrs);
4215 609338 : bms_free(key_attrs);
4216 609338 : bms_free(id_attrs);
4217 609338 : bms_free(modified_attrs);
4218 609338 : bms_free(interesting_attrs);
4219 :
4220 609338 : return TM_Ok;
4221 : }
4222 :
4223 : #ifdef USE_ASSERT_CHECKING
4224 : /*
4225 : * Confirm adequate lock held during heap_update(), per rules from
4226 : * README.tuplock section "Locking to write inplace-updated tables".
4227 : */
4228 : static void
4229 : check_lock_if_inplace_updateable_rel(Relation relation,
4230 : ItemPointer otid,
4231 : HeapTuple newtup)
4232 : {
4233 : /* LOCKTAG_TUPLE acceptable for any catalog */
4234 : switch (RelationGetRelid(relation))
4235 : {
4236 : case RelationRelationId:
4237 : case DatabaseRelationId:
4238 : {
4239 : LOCKTAG tuptag;
4240 :
4241 : SET_LOCKTAG_TUPLE(tuptag,
4242 : relation->rd_lockInfo.lockRelId.dbId,
4243 : relation->rd_lockInfo.lockRelId.relId,
4244 : ItemPointerGetBlockNumber(otid),
4245 : ItemPointerGetOffsetNumber(otid));
4246 : if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
4247 : return;
4248 : }
4249 : break;
4250 : default:
4251 : Assert(!IsInplaceUpdateRelation(relation));
4252 : return;
4253 : }
4254 :
4255 : switch (RelationGetRelid(relation))
4256 : {
4257 : case RelationRelationId:
4258 : {
4259 : /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4260 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
4261 : Oid relid = classForm->oid;
4262 : Oid dbid;
4263 : LOCKTAG tag;
4264 :
4265 : if (IsSharedRelation(relid))
4266 : dbid = InvalidOid;
4267 : else
4268 : dbid = MyDatabaseId;
4269 :
4270 : if (classForm->relkind == RELKIND_INDEX)
4271 : {
4272 : Relation irel = index_open(relid, AccessShareLock);
4273 :
4274 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4275 : index_close(irel, AccessShareLock);
4276 : }
4277 : else
4278 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4279 :
4280 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4281 : !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
4282 : elog(WARNING,
4283 : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4284 : NameStr(classForm->relname),
4285 : relid,
4286 : classForm->relkind,
4287 : ItemPointerGetBlockNumber(otid),
4288 : ItemPointerGetOffsetNumber(otid));
4289 : }
4290 : break;
4291 : case DatabaseRelationId:
4292 : {
4293 : /* LOCKTAG_TUPLE required */
4294 : Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
4295 :
4296 : elog(WARNING,
4297 : "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4298 : NameStr(dbForm->datname),
4299 : dbForm->oid,
4300 : ItemPointerGetBlockNumber(otid),
4301 : ItemPointerGetOffsetNumber(otid));
4302 : }
4303 : break;
4304 : }
4305 : }
4306 :
4307 : /*
4308 : * Confirm adequate relation lock held, per rules from README.tuplock section
4309 : * "Locking to write inplace-updated tables".
4310 : */
4311 : static void
4312 : check_inplace_rel_lock(HeapTuple oldtup)
4313 : {
4314 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
4315 : Oid relid = classForm->oid;
4316 : Oid dbid;
4317 : LOCKTAG tag;
4318 :
4319 : if (IsSharedRelation(relid))
4320 : dbid = InvalidOid;
4321 : else
4322 : dbid = MyDatabaseId;
4323 :
4324 : if (classForm->relkind == RELKIND_INDEX)
4325 : {
4326 : Relation irel = index_open(relid, AccessShareLock);
4327 :
4328 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4329 : index_close(irel, AccessShareLock);
4330 : }
4331 : else
4332 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4333 :
4334 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
4335 : elog(WARNING,
4336 : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4337 : NameStr(classForm->relname),
4338 : relid,
4339 : classForm->relkind,
4340 : ItemPointerGetBlockNumber(&oldtup->t_self),
4341 : ItemPointerGetOffsetNumber(&oldtup->t_self));
4342 : }
4343 : #endif
4344 :
4345 : /*
4346 : * Check if the specified attribute's values are the same. Subroutine for
4347 : * HeapDetermineColumnsInfo.
4348 : */
4349 : static bool
4350 1474614 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
4351 : bool isnull1, bool isnull2)
4352 : {
4353 : /*
4354 : * If one value is NULL and other is not, then they are certainly not
4355 : * equal
4356 : */
4357 1474614 : if (isnull1 != isnull2)
4358 90 : return false;
4359 :
4360 : /*
4361 : * If both are NULL, they can be considered equal.
4362 : */
4363 1474524 : if (isnull1)
4364 9982 : return true;
4365 :
4366 : /*
4367 : * We do simple binary comparison of the two datums. This may be overly
4368 : * strict because there can be multiple binary representations for the
4369 : * same logical value. But we should be OK as long as there are no false
4370 : * positives. Using a type-specific equality operator is messy because
4371 : * there could be multiple notions of equality in different operator
4372 : * classes; furthermore, we cannot safely invoke user-defined functions
4373 : * while holding exclusive buffer lock.
4374 : */
4375 1464542 : if (attrnum <= 0)
4376 : {
4377 : /* The only allowed system columns are OIDs, so do this */
4378 0 : return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
4379 : }
4380 : else
4381 : {
4382 : CompactAttribute *att;
4383 :
4384 : Assert(attrnum <= tupdesc->natts);
4385 1464542 : att = TupleDescCompactAttr(tupdesc, attrnum - 1);
4386 1464542 : return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4387 : }
4388 : }
4389 :
4390 : /*
4391 : * Check which columns are being updated.
4392 : *
4393 : * Given an updated tuple, determine (and return into the output bitmapset),
4394 : * from those listed as interesting, the set of columns that changed.
4395 : *
4396 : * has_external indicates if any of the unmodified attributes (from those
4397 : * listed as interesting) of the old tuple is a member of external_cols and is
4398 : * stored externally.
4399 : */
4400 : static Bitmapset *
4401 609682 : HeapDetermineColumnsInfo(Relation relation,
4402 : Bitmapset *interesting_cols,
4403 : Bitmapset *external_cols,
4404 : HeapTuple oldtup, HeapTuple newtup,
4405 : bool *has_external)
4406 : {
4407 : int attidx;
4408 609682 : Bitmapset *modified = NULL;
4409 609682 : TupleDesc tupdesc = RelationGetDescr(relation);
4410 :
4411 609682 : attidx = -1;
4412 2084296 : while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4413 : {
4414 : /* attidx is zero-based, attrnum is the normal attribute number */
4415 1474614 : AttrNumber attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
4416 : Datum value1,
4417 : value2;
4418 : bool isnull1,
4419 : isnull2;
4420 :
4421 : /*
4422 : * If it's a whole-tuple reference, say "not equal". It's not really
4423 : * worth supporting this case, since it could only succeed after a
4424 : * no-op update, which is hardly a case worth optimizing for.
4425 : */
4426 1474614 : if (attrnum == 0)
4427 : {
4428 0 : modified = bms_add_member(modified, attidx);
4429 1410276 : continue;
4430 : }
4431 :
4432 : /*
4433 : * Likewise, automatically say "not equal" for any system attribute
4434 : * other than tableOID; we cannot expect these to be consistent in a
4435 : * HOT chain, or even to be set correctly yet in the new tuple.
4436 : */
4437 1474614 : if (attrnum < 0)
4438 : {
4439 0 : if (attrnum != TableOidAttributeNumber)
4440 : {
4441 0 : modified = bms_add_member(modified, attidx);
4442 0 : continue;
4443 : }
4444 : }
4445 :
4446 : /*
4447 : * Extract the corresponding values. XXX this is pretty inefficient
4448 : * if there are many indexed columns. Should we do a single
4449 : * heap_deform_tuple call on each tuple, instead? But that doesn't
4450 : * work for system columns ...
4451 : */
4452 1474614 : value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4453 1474614 : value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4454 :
4455 1474614 : if (!heap_attr_equals(tupdesc, attrnum, value1,
4456 : value2, isnull1, isnull2))
4457 : {
4458 53460 : modified = bms_add_member(modified, attidx);
4459 53460 : continue;
4460 : }
4461 :
4462 : /*
4463 : * No need to check attributes that can't be stored externally. Note
4464 : * that system attributes can't be stored externally.
4465 : */
4466 1421154 : if (attrnum < 0 || isnull1 ||
4467 1411172 : TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
4468 1356816 : continue;
4469 :
4470 : /*
4471 : * Check if the old tuple's attribute is stored externally and is a
4472 : * member of external_cols.
4473 : */
4474 64348 : if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
4475 10 : bms_is_member(attidx, external_cols))
4476 4 : *has_external = true;
4477 : }
4478 :
4479 609682 : return modified;
4480 : }
4481 :
4482 : /*
4483 : * simple_heap_update - replace a tuple
4484 : *
4485 : * This routine may be used to update a tuple when concurrent updates of
4486 : * the target tuple are not expected (for example, because we have a lock
4487 : * on the relation associated with the tuple). Any failure is reported
4488 : * via ereport().
4489 : */
4490 : void
4491 222518 : simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup,
4492 : TU_UpdateIndexes *update_indexes)
4493 : {
4494 : TM_Result result;
4495 : TM_FailureData tmfd;
4496 : LockTupleMode lockmode;
4497 :
4498 222518 : result = heap_update(relation, otid, tup,
4499 : GetCurrentCommandId(true), InvalidSnapshot,
4500 : true /* wait for commit */ ,
4501 : &tmfd, &lockmode, update_indexes);
4502 222518 : switch (result)
4503 : {
4504 0 : case TM_SelfModified:
4505 : /* Tuple was already updated in current command? */
4506 0 : elog(ERROR, "tuple already updated by self");
4507 : break;
4508 :
4509 222516 : case TM_Ok:
4510 : /* done successfully */
4511 222516 : break;
4512 :
4513 0 : case TM_Updated:
4514 0 : elog(ERROR, "tuple concurrently updated");
4515 : break;
4516 :
4517 2 : case TM_Deleted:
4518 2 : elog(ERROR, "tuple concurrently deleted");
4519 : break;
4520 :
4521 0 : default:
4522 0 : elog(ERROR, "unrecognized heap_update status: %u", result);
4523 : break;
4524 : }
4525 222516 : }
4526 :
4527 :
4528 : /*
4529 : * Return the MultiXactStatus corresponding to the given tuple lock mode.
4530 : */
4531 : static MultiXactStatus
4532 2412 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4533 : {
4534 : int retval;
4535 :
4536 2412 : if (is_update)
4537 192 : retval = tupleLockExtraInfo[mode].updstatus;
4538 : else
4539 2220 : retval = tupleLockExtraInfo[mode].lockstatus;
4540 :
4541 2412 : if (retval == -1)
4542 0 : elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4543 : is_update ? "true" : "false");
4544 :
4545 2412 : return (MultiXactStatus) retval;
4546 : }
4547 :
4548 : /*
4549 : * heap_lock_tuple - lock a tuple in shared or exclusive mode
4550 : *
4551 : * Note that this acquires a buffer pin, which the caller must release.
4552 : *
4553 : * Input parameters:
4554 : * relation: relation containing tuple (caller must hold suitable lock)
4555 : * cid: current command ID (used for visibility test, and stored into
4556 : * tuple's cmax if lock is successful)
4557 : * mode: indicates if shared or exclusive tuple lock is desired
4558 : * wait_policy: what to do if tuple lock is not available
4559 : * follow_updates: if true, follow the update chain to also lock descendant
4560 : * tuples.
4561 : *
4562 : * Output parameters:
4563 : * *tuple: all fields filled in
4564 : * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4565 : * *tmfd: filled in failure cases (see below)
4566 : *
4567 : * Function results are the same as the ones for table_tuple_lock().
4568 : *
4569 : * In the failure cases other than TM_Invisible, the routine fills
4570 : * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4571 : * if necessary), and t_cmax (the last only for TM_SelfModified,
4572 : * since we cannot obtain cmax from a combo CID generated by another
4573 : * transaction).
4574 : * See comments for struct TM_FailureData for additional info.
4575 : *
4576 : * See README.tuplock for a thorough explanation of this mechanism.
4577 : */
4578 : TM_Result
4579 169956 : heap_lock_tuple(Relation relation, HeapTuple tuple,
4580 : CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4581 : bool follow_updates,
4582 : Buffer *buffer, TM_FailureData *tmfd)
4583 : {
4584 : TM_Result result;
4585 169956 : ItemPointer tid = &(tuple->t_self);
4586 : ItemId lp;
4587 : Page page;
4588 169956 : Buffer vmbuffer = InvalidBuffer;
4589 : BlockNumber block;
4590 : TransactionId xid,
4591 : xmax;
4592 : uint16 old_infomask,
4593 : new_infomask,
4594 : new_infomask2;
4595 169956 : bool first_time = true;
4596 169956 : bool skip_tuple_lock = false;
4597 169956 : bool have_tuple_lock = false;
4598 169956 : bool cleared_all_frozen = false;
4599 :
4600 169956 : *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4601 169956 : block = ItemPointerGetBlockNumber(tid);
4602 :
4603 : /*
4604 : * Before locking the buffer, pin the visibility map page if it appears to
4605 : * be necessary. Since we haven't got the lock yet, someone else might be
4606 : * in the middle of changing this, so we'll need to recheck after we have
4607 : * the lock.
4608 : */
4609 169956 : if (PageIsAllVisible(BufferGetPage(*buffer)))
4610 3324 : visibilitymap_pin(relation, block, &vmbuffer);
4611 :
4612 169956 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4613 :
4614 169956 : page = BufferGetPage(*buffer);
4615 169956 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4616 : Assert(ItemIdIsNormal(lp));
4617 :
4618 169956 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4619 169956 : tuple->t_len = ItemIdGetLength(lp);
4620 169956 : tuple->t_tableOid = RelationGetRelid(relation);
4621 :
4622 28 : l3:
4623 169984 : result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4624 :
4625 169984 : if (result == TM_Invisible)
4626 : {
4627 : /*
4628 : * This is possible, but only when locking a tuple for ON CONFLICT
4629 : * UPDATE. We return this value here rather than throwing an error in
4630 : * order to give that case the opportunity to throw a more specific
4631 : * error.
4632 : */
4633 24 : result = TM_Invisible;
4634 24 : goto out_locked;
4635 : }
4636 169960 : else if (result == TM_BeingModified ||
4637 154160 : result == TM_Updated ||
4638 : result == TM_Deleted)
4639 : {
4640 : TransactionId xwait;
4641 : uint16 infomask;
4642 : uint16 infomask2;
4643 : bool require_sleep;
4644 : ItemPointerData t_ctid;
4645 :
4646 : /* must copy state data before unlocking buffer */
4647 15802 : xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4648 15802 : infomask = tuple->t_data->t_infomask;
4649 15802 : infomask2 = tuple->t_data->t_infomask2;
4650 15802 : ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4651 :
4652 15802 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4653 :
4654 : /*
4655 : * If any subtransaction of the current top transaction already holds
4656 : * a lock as strong as or stronger than what we're requesting, we
4657 : * effectively hold the desired lock already. We *must* succeed
4658 : * without trying to take the tuple lock, else we will deadlock
4659 : * against anyone wanting to acquire a stronger lock.
4660 : *
4661 : * Note we only do this the first time we loop on the HTSU result;
4662 : * there is no point in testing in subsequent passes, because
4663 : * evidently our own transaction cannot have acquired a new lock after
4664 : * the first time we checked.
4665 : */
4666 15802 : if (first_time)
4667 : {
4668 15784 : first_time = false;
4669 :
4670 15784 : if (infomask & HEAP_XMAX_IS_MULTI)
4671 : {
4672 : int i;
4673 : int nmembers;
4674 : MultiXactMember *members;
4675 :
4676 : /*
4677 : * We don't need to allow old multixacts here; if that had
4678 : * been the case, HeapTupleSatisfiesUpdate would have returned
4679 : * MayBeUpdated and we wouldn't be here.
4680 : */
4681 : nmembers =
4682 170 : GetMultiXactIdMembers(xwait, &members, false,
4683 170 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4684 :
4685 510 : for (i = 0; i < nmembers; i++)
4686 : {
4687 : /* only consider members of our own transaction */
4688 368 : if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4689 270 : continue;
4690 :
4691 98 : if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4692 : {
4693 28 : pfree(members);
4694 28 : result = TM_Ok;
4695 28 : goto out_unlocked;
4696 : }
4697 : else
4698 : {
4699 : /*
4700 : * Disable acquisition of the heavyweight tuple lock.
4701 : * Otherwise, when promoting a weaker lock, we might
4702 : * deadlock with another locker that has acquired the
4703 : * heavyweight tuple lock and is waiting for our
4704 : * transaction to finish.
4705 : *
4706 : * Note that in this case we still need to wait for
4707 : * the multixact if required, to avoid acquiring
4708 : * conflicting locks.
4709 : */
4710 70 : skip_tuple_lock = true;
4711 : }
4712 : }
4713 :
4714 142 : if (members)
4715 142 : pfree(members);
4716 : }
4717 15614 : else if (TransactionIdIsCurrentTransactionId(xwait))
4718 : {
4719 13106 : switch (mode)
4720 : {
4721 334 : case LockTupleKeyShare:
4722 : Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4723 : HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4724 : HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4725 334 : result = TM_Ok;
4726 334 : goto out_unlocked;
4727 40 : case LockTupleShare:
4728 52 : if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4729 12 : HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4730 : {
4731 28 : result = TM_Ok;
4732 28 : goto out_unlocked;
4733 : }
4734 12 : break;
4735 130 : case LockTupleNoKeyExclusive:
4736 130 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4737 : {
4738 106 : result = TM_Ok;
4739 106 : goto out_unlocked;
4740 : }
4741 24 : break;
4742 12602 : case LockTupleExclusive:
4743 12602 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4744 2522 : infomask2 & HEAP_KEYS_UPDATED)
4745 : {
4746 2480 : result = TM_Ok;
4747 2480 : goto out_unlocked;
4748 : }
4749 10122 : break;
4750 : }
4751 : }
4752 : }
4753 :
4754 : /*
4755 : * Initially assume that we will have to wait for the locking
4756 : * transaction(s) to finish. We check various cases below in which
4757 : * this can be turned off.
4758 : */
4759 12826 : require_sleep = true;
4760 12826 : if (mode == LockTupleKeyShare)
4761 : {
4762 : /*
4763 : * If we're requesting KeyShare, and there's no update present, we
4764 : * don't need to wait. Even if there is an update, we can still
4765 : * continue if the key hasn't been modified.
4766 : *
4767 : * However, if there are updates, we need to walk the update chain
4768 : * to mark future versions of the row as locked, too. That way,
4769 : * if somebody deletes that future version, we're protected
4770 : * against the key going away. This locking of future versions
4771 : * could block momentarily, if a concurrent transaction is
4772 : * deleting a key; or it could return a value to the effect that
4773 : * the transaction deleting the key has already committed. So we
4774 : * do this before re-locking the buffer; otherwise this would be
4775 : * prone to deadlocks.
4776 : *
4777 : * Note that the TID we're locking was grabbed before we unlocked
4778 : * the buffer. For it to change while we're not looking, the
4779 : * other properties we're testing for below after re-locking the
4780 : * buffer would also change, in which case we would restart this
4781 : * loop above.
4782 : */
4783 1194 : if (!(infomask2 & HEAP_KEYS_UPDATED))
4784 : {
4785 : bool updated;
4786 :
4787 1108 : updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4788 :
4789 : /*
4790 : * If there are updates, follow the update chain; bail out if
4791 : * that cannot be done.
4792 : */
4793 1108 : if (follow_updates && updated)
4794 : {
4795 : TM_Result res;
4796 :
4797 100 : res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4798 : GetCurrentTransactionId(),
4799 : mode);
4800 100 : if (res != TM_Ok)
4801 : {
4802 12 : result = res;
4803 : /* recovery code expects to have buffer lock held */
4804 12 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4805 384 : goto failed;
4806 : }
4807 : }
4808 :
4809 1096 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4810 :
4811 : /*
4812 : * Make sure it's still an appropriate lock, else start over.
4813 : * Also, if it wasn't updated before we released the lock, but
4814 : * is updated now, we start over too; the reason is that we
4815 : * now need to follow the update chain to lock the new
4816 : * versions.
4817 : */
4818 1096 : if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4819 86 : ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4820 86 : !updated))
4821 28 : goto l3;
4822 :
4823 : /* Things look okay, so we can skip sleeping */
4824 1096 : require_sleep = false;
4825 :
4826 : /*
4827 : * Note we allow Xmax to change here; other updaters/lockers
4828 : * could have modified it before we grabbed the buffer lock.
4829 : * However, this is not a problem, because with the recheck we
4830 : * just did we ensure that they still don't conflict with the
4831 : * lock we want.
4832 : */
4833 : }
4834 : }
4835 11632 : else if (mode == LockTupleShare)
4836 : {
4837 : /*
4838 : * If we're requesting Share, we can similarly avoid sleeping if
4839 : * there's no update and no exclusive lock present.
4840 : */
4841 884 : if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4842 884 : !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4843 : {
4844 872 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4845 :
4846 : /*
4847 : * Make sure it's still an appropriate lock, else start over.
4848 : * See above about allowing xmax to change.
4849 : */
4850 1744 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4851 872 : HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4852 0 : goto l3;
4853 872 : require_sleep = false;
4854 : }
4855 : }
4856 10748 : else if (mode == LockTupleNoKeyExclusive)
4857 : {
4858 : /*
4859 : * If we're requesting NoKeyExclusive, we might also be able to
4860 : * avoid sleeping; just ensure that there no conflicting lock
4861 : * already acquired.
4862 : */
4863 330 : if (infomask & HEAP_XMAX_IS_MULTI)
4864 : {
4865 52 : if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4866 : mode, NULL))
4867 : {
4868 : /*
4869 : * No conflict, but if the xmax changed under us in the
4870 : * meantime, start over.
4871 : */
4872 26 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4873 52 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4874 26 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4875 : xwait))
4876 0 : goto l3;
4877 :
4878 : /* otherwise, we're good */
4879 26 : require_sleep = false;
4880 : }
4881 : }
4882 278 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4883 : {
4884 36 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4885 :
4886 : /* if the xmax changed in the meantime, start over */
4887 72 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4888 36 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4889 : xwait))
4890 0 : goto l3;
4891 : /* otherwise, we're good */
4892 36 : require_sleep = false;
4893 : }
4894 : }
4895 :
4896 : /*
4897 : * As a check independent from those above, we can also avoid sleeping
4898 : * if the current transaction is the sole locker of the tuple. Note
4899 : * that the strength of the lock already held is irrelevant; this is
4900 : * not about recording the lock in Xmax (which will be done regardless
4901 : * of this optimization, below). Also, note that the cases where we
4902 : * hold a lock stronger than we are requesting are already handled
4903 : * above by not doing anything.
4904 : *
4905 : * Note we only deal with the non-multixact case here; MultiXactIdWait
4906 : * is well equipped to deal with this situation on its own.
4907 : */
4908 23516 : if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4909 10702 : TransactionIdIsCurrentTransactionId(xwait))
4910 : {
4911 : /* ... but if the xmax changed in the meantime, start over */
4912 10122 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4913 20244 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4914 10122 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4915 : xwait))
4916 0 : goto l3;
4917 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
4918 10122 : require_sleep = false;
4919 : }
4920 :
4921 : /*
4922 : * Time to sleep on the other transaction/multixact, if necessary.
4923 : *
4924 : * If the other transaction is an update/delete that's already
4925 : * committed, then sleeping cannot possibly do any good: if we're
4926 : * required to sleep, get out to raise an error instead.
4927 : *
4928 : * By here, we either have already acquired the buffer exclusive lock,
4929 : * or we must wait for the locking transaction or multixact; so below
4930 : * we ensure that we grab buffer lock after the sleep.
4931 : */
4932 12814 : if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4933 : {
4934 296 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4935 296 : goto failed;
4936 : }
4937 12518 : else if (require_sleep)
4938 : {
4939 : /*
4940 : * Acquire tuple lock to establish our priority for the tuple, or
4941 : * die trying. LockTuple will release us when we are next-in-line
4942 : * for the tuple. We must do this even if we are share-locking,
4943 : * but not if we already have a weaker lock on the tuple.
4944 : *
4945 : * If we are forced to "start over" below, we keep the tuple lock;
4946 : * this arranges that we stay at the head of the line while
4947 : * rechecking tuple state.
4948 : */
4949 366 : if (!skip_tuple_lock &&
4950 334 : !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4951 : &have_tuple_lock))
4952 : {
4953 : /*
4954 : * This can only happen if wait_policy is Skip and the lock
4955 : * couldn't be obtained.
4956 : */
4957 2 : result = TM_WouldBlock;
4958 : /* recovery code expects to have buffer lock held */
4959 2 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4960 2 : goto failed;
4961 : }
4962 :
4963 362 : if (infomask & HEAP_XMAX_IS_MULTI)
4964 : {
4965 80 : MultiXactStatus status = get_mxact_status_for_lock(mode, false);
4966 :
4967 : /* We only ever lock tuples, never update them */
4968 80 : if (status >= MultiXactStatusNoKeyUpdate)
4969 0 : elog(ERROR, "invalid lock mode in heap_lock_tuple");
4970 :
4971 : /* wait for multixact to end, or die trying */
4972 80 : switch (wait_policy)
4973 : {
4974 72 : case LockWaitBlock:
4975 72 : MultiXactIdWait((MultiXactId) xwait, status, infomask,
4976 : relation, &tuple->t_self, XLTW_Lock, NULL);
4977 72 : break;
4978 4 : case LockWaitSkip:
4979 4 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4980 : status, infomask, relation,
4981 : NULL, false))
4982 : {
4983 4 : result = TM_WouldBlock;
4984 : /* recovery code expects to have buffer lock held */
4985 4 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4986 4 : goto failed;
4987 : }
4988 0 : break;
4989 4 : case LockWaitError:
4990 4 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4991 : status, infomask, relation,
4992 : NULL, log_lock_failures))
4993 4 : ereport(ERROR,
4994 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4995 : errmsg("could not obtain lock on row in relation \"%s\"",
4996 : RelationGetRelationName(relation))));
4997 :
4998 0 : break;
4999 : }
5000 :
5001 : /*
5002 : * Of course, the multixact might not be done here: if we're
5003 : * requesting a light lock mode, other transactions with light
5004 : * locks could still be alive, as well as locks owned by our
5005 : * own xact or other subxacts of this backend. We need to
5006 : * preserve the surviving MultiXact members. Note that it
5007 : * isn't absolutely necessary in the latter case, but doing so
5008 : * is simpler.
5009 : */
5010 : }
5011 : else
5012 : {
5013 : /* wait for regular transaction to end, or die trying */
5014 282 : switch (wait_policy)
5015 : {
5016 204 : case LockWaitBlock:
5017 204 : XactLockTableWait(xwait, relation, &tuple->t_self,
5018 : XLTW_Lock);
5019 204 : break;
5020 66 : case LockWaitSkip:
5021 66 : if (!ConditionalXactLockTableWait(xwait, false))
5022 : {
5023 66 : result = TM_WouldBlock;
5024 : /* recovery code expects to have buffer lock held */
5025 66 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5026 66 : goto failed;
5027 : }
5028 0 : break;
5029 12 : case LockWaitError:
5030 12 : if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
5031 12 : ereport(ERROR,
5032 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5033 : errmsg("could not obtain lock on row in relation \"%s\"",
5034 : RelationGetRelationName(relation))));
5035 0 : break;
5036 : }
5037 : }
5038 :
5039 : /* if there are updates, follow the update chain */
5040 276 : if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5041 : {
5042 : TM_Result res;
5043 :
5044 104 : res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5045 : GetCurrentTransactionId(),
5046 : mode);
5047 104 : if (res != TM_Ok)
5048 : {
5049 4 : result = res;
5050 : /* recovery code expects to have buffer lock held */
5051 4 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5052 4 : goto failed;
5053 : }
5054 : }
5055 :
5056 272 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5057 :
5058 : /*
5059 : * xwait is done, but if xwait had just locked the tuple then some
5060 : * other xact could update this tuple before we get to this point.
5061 : * Check for xmax change, and start over if so.
5062 : */
5063 520 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5064 248 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
5065 : xwait))
5066 28 : goto l3;
5067 :
5068 244 : if (!(infomask & HEAP_XMAX_IS_MULTI))
5069 : {
5070 : /*
5071 : * Otherwise check if it committed or aborted. Note we cannot
5072 : * be here if the tuple was only locked by somebody who didn't
5073 : * conflict with us; that would have been handled above. So
5074 : * that transaction must necessarily be gone by now. But
5075 : * don't check for this in the multixact case, because some
5076 : * locker transactions might still be running.
5077 : */
5078 182 : UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5079 : }
5080 : }
5081 :
5082 : /* By here, we're certain that we hold buffer exclusive lock again */
5083 :
5084 : /*
5085 : * We may lock if previous xmax aborted, or if it committed but only
5086 : * locked the tuple without updating it; or if we didn't have to wait
5087 : * at all for whatever reason.
5088 : */
5089 12396 : if (!require_sleep ||
5090 428 : (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5091 338 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
5092 154 : HeapTupleHeaderIsOnlyLocked(tuple->t_data))
5093 12254 : result = TM_Ok;
5094 142 : else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
5095 106 : result = TM_Updated;
5096 : else
5097 36 : result = TM_Deleted;
5098 : }
5099 :
5100 154158 : failed:
5101 166938 : if (result != TM_Ok)
5102 : {
5103 : Assert(result == TM_SelfModified || result == TM_Updated ||
5104 : result == TM_Deleted || result == TM_WouldBlock);
5105 :
5106 : /*
5107 : * When locking a tuple under LockWaitSkip semantics and we fail with
5108 : * TM_WouldBlock above, it's possible for concurrent transactions to
5109 : * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5110 : * this assert is slightly different from the equivalent one in
5111 : * heap_delete and heap_update.
5112 : */
5113 : Assert((result == TM_WouldBlock) ||
5114 : !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5115 : Assert(result != TM_Updated ||
5116 : !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5117 538 : tmfd->ctid = tuple->t_data->t_ctid;
5118 538 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5119 538 : if (result == TM_SelfModified)
5120 12 : tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5121 : else
5122 526 : tmfd->cmax = InvalidCommandId;
5123 538 : goto out_locked;
5124 : }
5125 :
5126 : /*
5127 : * If we didn't pin the visibility map page and the page has become all
5128 : * visible while we were busy locking the buffer, or during some
5129 : * subsequent window during which we had it unlocked, we'll have to unlock
5130 : * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5131 : * unfortunate, especially since we'll now have to recheck whether the
5132 : * tuple has been locked or updated under us, but hopefully it won't
5133 : * happen very often.
5134 : */
5135 166400 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5136 : {
5137 0 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5138 0 : visibilitymap_pin(relation, block, &vmbuffer);
5139 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5140 0 : goto l3;
5141 : }
5142 :
5143 166400 : xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5144 166400 : old_infomask = tuple->t_data->t_infomask;
5145 :
5146 : /*
5147 : * If this is the first possibly-multixact-able operation in the current
5148 : * transaction, set my per-backend OldestMemberMXactId setting. We can be
5149 : * certain that the transaction will never become a member of any older
5150 : * MultiXactIds than that. (We have to do this even if we end up just
5151 : * using our own TransactionId below, since some other backend could
5152 : * incorporate our XID into a MultiXact immediately afterwards.)
5153 : */
5154 166400 : MultiXactIdSetOldestMember();
5155 :
5156 : /*
5157 : * Compute the new xmax and infomask to store into the tuple. Note we do
5158 : * not modify the tuple just yet, because that would leave it in the wrong
5159 : * state if multixact.c elogs.
5160 : */
5161 166400 : compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5162 : GetCurrentTransactionId(), mode, false,
5163 : &xid, &new_infomask, &new_infomask2);
5164 :
5165 166400 : START_CRIT_SECTION();
5166 :
5167 : /*
5168 : * Store transaction information of xact locking the tuple.
5169 : *
5170 : * Note: Cmax is meaningless in this context, so don't set it; this avoids
5171 : * possibly generating a useless combo CID. Moreover, if we're locking a
5172 : * previously updated tuple, it's important to preserve the Cmax.
5173 : *
5174 : * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5175 : * we would break the HOT chain.
5176 : */
5177 166400 : tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5178 166400 : tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5179 166400 : tuple->t_data->t_infomask |= new_infomask;
5180 166400 : tuple->t_data->t_infomask2 |= new_infomask2;
5181 166400 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5182 166322 : HeapTupleHeaderClearHotUpdated(tuple->t_data);
5183 166400 : HeapTupleHeaderSetXmax(tuple->t_data, xid);
5184 :
5185 : /*
5186 : * Make sure there is no forward chain link in t_ctid. Note that in the
5187 : * cases where the tuple has been updated, we must not overwrite t_ctid,
5188 : * because it was set by the updater. Moreover, if the tuple has been
5189 : * updated, we need to follow the update chain to lock the new versions of
5190 : * the tuple as well.
5191 : */
5192 166400 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5193 166322 : tuple->t_data->t_ctid = *tid;
5194 :
5195 : /* Clear only the all-frozen bit on visibility map if needed */
5196 169724 : if (PageIsAllVisible(page) &&
5197 3324 : visibilitymap_clear(relation, block, vmbuffer,
5198 : VISIBILITYMAP_ALL_FROZEN))
5199 28 : cleared_all_frozen = true;
5200 :
5201 :
5202 166400 : MarkBufferDirty(*buffer);
5203 :
5204 : /*
5205 : * XLOG stuff. You might think that we don't need an XLOG record because
5206 : * there is no state change worth restoring after a crash. You would be
5207 : * wrong however: we have just written either a TransactionId or a
5208 : * MultiXactId that may never have been seen on disk before, and we need
5209 : * to make sure that there are XLOG entries covering those ID numbers.
5210 : * Else the same IDs might be re-used after a crash, which would be
5211 : * disastrous if this page made it to disk before the crash. Essentially
5212 : * we have to enforce the WAL log-before-data rule even in this case.
5213 : * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5214 : * entries for everything anyway.)
5215 : */
5216 166400 : if (RelationNeedsWAL(relation))
5217 : {
5218 : xl_heap_lock xlrec;
5219 : XLogRecPtr recptr;
5220 :
5221 165712 : XLogBeginInsert();
5222 165712 : XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
5223 :
5224 165712 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5225 165712 : xlrec.xmax = xid;
5226 331424 : xlrec.infobits_set = compute_infobits(new_infomask,
5227 165712 : tuple->t_data->t_infomask2);
5228 165712 : xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5229 165712 : XLogRegisterData(&xlrec, SizeOfHeapLock);
5230 :
5231 : /* we don't decode row locks atm, so no need to log the origin */
5232 :
5233 165712 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
5234 :
5235 165712 : PageSetLSN(page, recptr);
5236 : }
5237 :
5238 166400 : END_CRIT_SECTION();
5239 :
5240 166400 : result = TM_Ok;
5241 :
5242 166962 : out_locked:
5243 166962 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5244 :
5245 169938 : out_unlocked:
5246 169938 : if (BufferIsValid(vmbuffer))
5247 3324 : ReleaseBuffer(vmbuffer);
5248 :
5249 : /*
5250 : * Don't update the visibility map here. Locking a tuple doesn't change
5251 : * visibility info.
5252 : */
5253 :
5254 : /*
5255 : * Now that we have successfully marked the tuple as locked, we can
5256 : * release the lmgr tuple lock, if we had it.
5257 : */
5258 169938 : if (have_tuple_lock)
5259 304 : UnlockTupleTuplock(relation, tid, mode);
5260 :
5261 169938 : return result;
5262 : }
5263 :
5264 : /*
5265 : * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5266 : * its normal, Xmax-based tuple lock.
5267 : *
5268 : * have_tuple_lock is an input and output parameter: on input, it indicates
5269 : * whether the lock has previously been acquired (and this function does
5270 : * nothing in that case). If this function returns success, have_tuple_lock
5271 : * has been flipped to true.
5272 : *
5273 : * Returns false if it was unable to obtain the lock; this can only happen if
5274 : * wait_policy is Skip.
5275 : */
5276 : static bool
5277 604 : heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
5278 : LockWaitPolicy wait_policy, bool *have_tuple_lock)
5279 : {
5280 604 : if (*have_tuple_lock)
5281 18 : return true;
5282 :
5283 586 : switch (wait_policy)
5284 : {
5285 504 : case LockWaitBlock:
5286 504 : LockTupleTuplock(relation, tid, mode);
5287 504 : break;
5288 :
5289 68 : case LockWaitSkip:
5290 68 : if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
5291 2 : return false;
5292 66 : break;
5293 :
5294 14 : case LockWaitError:
5295 14 : if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
5296 2 : ereport(ERROR,
5297 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5298 : errmsg("could not obtain lock on row in relation \"%s\"",
5299 : RelationGetRelationName(relation))));
5300 12 : break;
5301 : }
5302 582 : *have_tuple_lock = true;
5303 :
5304 582 : return true;
5305 : }
5306 :
5307 : /*
5308 : * Given an original set of Xmax and infomask, and a transaction (identified by
5309 : * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5310 : * corresponding infomasks to use on the tuple.
5311 : *
5312 : * Note that this might have side effects such as creating a new MultiXactId.
5313 : *
5314 : * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5315 : * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5316 : * but it was not running anymore. There is a race condition, which is that the
5317 : * MultiXactId may have finished since then, but that uncommon case is handled
5318 : * either here, or within MultiXactIdExpand.
5319 : *
5320 : * There is a similar race condition possible when the old xmax was a regular
5321 : * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5322 : * window, but it's still possible to end up creating an unnecessary
5323 : * MultiXactId. Fortunately this is harmless.
5324 : */
5325 : static void
5326 4061458 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
5327 : uint16 old_infomask2, TransactionId add_to_xmax,
5328 : LockTupleMode mode, bool is_update,
5329 : TransactionId *result_xmax, uint16 *result_infomask,
5330 : uint16 *result_infomask2)
5331 : {
5332 : TransactionId new_xmax;
5333 : uint16 new_infomask,
5334 : new_infomask2;
5335 :
5336 : Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
5337 :
5338 207992 : l5:
5339 4269450 : new_infomask = 0;
5340 4269450 : new_infomask2 = 0;
5341 4269450 : if (old_infomask & HEAP_XMAX_INVALID)
5342 : {
5343 : /*
5344 : * No previous locker; we just insert our own TransactionId.
5345 : *
5346 : * Note that it's critical that this case be the first one checked,
5347 : * because there are several blocks below that come back to this one
5348 : * to implement certain optimizations; old_infomask might contain
5349 : * other dirty bits in those cases, but we don't really care.
5350 : */
5351 4059190 : if (is_update)
5352 : {
5353 3594860 : new_xmax = add_to_xmax;
5354 3594860 : if (mode == LockTupleExclusive)
5355 3058050 : new_infomask2 |= HEAP_KEYS_UPDATED;
5356 : }
5357 : else
5358 : {
5359 464330 : new_infomask |= HEAP_XMAX_LOCK_ONLY;
5360 464330 : switch (mode)
5361 : {
5362 5114 : case LockTupleKeyShare:
5363 5114 : new_xmax = add_to_xmax;
5364 5114 : new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5365 5114 : break;
5366 1472 : case LockTupleShare:
5367 1472 : new_xmax = add_to_xmax;
5368 1472 : new_infomask |= HEAP_XMAX_SHR_LOCK;
5369 1472 : break;
5370 266312 : case LockTupleNoKeyExclusive:
5371 266312 : new_xmax = add_to_xmax;
5372 266312 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5373 266312 : break;
5374 191432 : case LockTupleExclusive:
5375 191432 : new_xmax = add_to_xmax;
5376 191432 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5377 191432 : new_infomask2 |= HEAP_KEYS_UPDATED;
5378 191432 : break;
5379 0 : default:
5380 0 : new_xmax = InvalidTransactionId; /* silence compiler */
5381 0 : elog(ERROR, "invalid lock mode");
5382 : }
5383 : }
5384 : }
5385 210260 : else if (old_infomask & HEAP_XMAX_IS_MULTI)
5386 : {
5387 : MultiXactStatus new_status;
5388 :
5389 : /*
5390 : * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5391 : * cross-check.
5392 : */
5393 : Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5394 :
5395 : /*
5396 : * A multixact together with LOCK_ONLY set but neither lock bit set
5397 : * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5398 : * anymore. This check is critical for databases upgraded by
5399 : * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5400 : * that such multis are never passed.
5401 : */
5402 242 : if (HEAP_LOCKED_UPGRADED(old_infomask))
5403 : {
5404 0 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5405 0 : old_infomask |= HEAP_XMAX_INVALID;
5406 0 : goto l5;
5407 : }
5408 :
5409 : /*
5410 : * If the XMAX is already a MultiXactId, then we need to expand it to
5411 : * include add_to_xmax; but if all the members were lockers and are
5412 : * all gone, we can do away with the IS_MULTI bit and just set
5413 : * add_to_xmax as the only locker/updater. If all lockers are gone
5414 : * and we have an updater that aborted, we can also do without a
5415 : * multi.
5416 : *
5417 : * The cost of doing GetMultiXactIdMembers would be paid by
5418 : * MultiXactIdExpand if we weren't to do this, so this check is not
5419 : * incurring extra work anyhow.
5420 : */
5421 242 : if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5422 : {
5423 46 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5424 16 : !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
5425 : old_infomask)))
5426 : {
5427 : /*
5428 : * Reset these bits and restart; otherwise fall through to
5429 : * create a new multi below.
5430 : */
5431 46 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5432 46 : old_infomask |= HEAP_XMAX_INVALID;
5433 46 : goto l5;
5434 : }
5435 : }
5436 :
5437 196 : new_status = get_mxact_status_for_lock(mode, is_update);
5438 :
5439 196 : new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5440 : new_status);
5441 196 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5442 : }
5443 210018 : else if (old_infomask & HEAP_XMAX_COMMITTED)
5444 : {
5445 : /*
5446 : * It's a committed update, so we need to preserve him as updater of
5447 : * the tuple.
5448 : */
5449 : MultiXactStatus status;
5450 : MultiXactStatus new_status;
5451 :
5452 26 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5453 0 : status = MultiXactStatusUpdate;
5454 : else
5455 26 : status = MultiXactStatusNoKeyUpdate;
5456 :
5457 26 : new_status = get_mxact_status_for_lock(mode, is_update);
5458 :
5459 : /*
5460 : * since it's not running, it's obviously impossible for the old
5461 : * updater to be identical to the current one, so we need not check
5462 : * for that case as we do in the block above.
5463 : */
5464 26 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5465 26 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5466 : }
5467 209992 : else if (TransactionIdIsInProgress(xmax))
5468 : {
5469 : /*
5470 : * If the XMAX is a valid, in-progress TransactionId, then we need to
5471 : * create a new MultiXactId that includes both the old locker or
5472 : * updater and our own TransactionId.
5473 : */
5474 : MultiXactStatus new_status;
5475 : MultiXactStatus old_status;
5476 : LockTupleMode old_mode;
5477 :
5478 209974 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5479 : {
5480 209922 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5481 11248 : old_status = MultiXactStatusForKeyShare;
5482 198674 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5483 864 : old_status = MultiXactStatusForShare;
5484 197810 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5485 : {
5486 197810 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5487 185572 : old_status = MultiXactStatusForUpdate;
5488 : else
5489 12238 : old_status = MultiXactStatusForNoKeyUpdate;
5490 : }
5491 : else
5492 : {
5493 : /*
5494 : * LOCK_ONLY can be present alone only when a page has been
5495 : * upgraded by pg_upgrade. But in that case,
5496 : * TransactionIdIsInProgress() should have returned false. We
5497 : * assume it's no longer locked in this case.
5498 : */
5499 0 : elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5500 0 : old_infomask |= HEAP_XMAX_INVALID;
5501 0 : old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5502 0 : goto l5;
5503 : }
5504 : }
5505 : else
5506 : {
5507 : /* it's an update, but which kind? */
5508 52 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5509 0 : old_status = MultiXactStatusUpdate;
5510 : else
5511 52 : old_status = MultiXactStatusNoKeyUpdate;
5512 : }
5513 :
5514 209974 : old_mode = TUPLOCK_from_mxstatus(old_status);
5515 :
5516 : /*
5517 : * If the lock to be acquired is for the same TransactionId as the
5518 : * existing lock, there's an optimization possible: consider only the
5519 : * strongest of both locks as the only one present, and restart.
5520 : */
5521 209974 : if (xmax == add_to_xmax)
5522 : {
5523 : /*
5524 : * Note that it's not possible for the original tuple to be
5525 : * updated: we wouldn't be here because the tuple would have been
5526 : * invisible and we wouldn't try to update it. As a subtlety,
5527 : * this code can also run when traversing an update chain to lock
5528 : * future versions of a tuple. But we wouldn't be here either,
5529 : * because the add_to_xmax would be different from the original
5530 : * updater.
5531 : */
5532 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5533 :
5534 : /* acquire the strongest of both */
5535 207930 : if (mode < old_mode)
5536 104452 : mode = old_mode;
5537 : /* mustn't touch is_update */
5538 :
5539 207930 : old_infomask |= HEAP_XMAX_INVALID;
5540 207930 : goto l5;
5541 : }
5542 :
5543 : /* otherwise, just fall back to creating a new multixact */
5544 2044 : new_status = get_mxact_status_for_lock(mode, is_update);
5545 2044 : new_xmax = MultiXactIdCreate(xmax, old_status,
5546 : add_to_xmax, new_status);
5547 2044 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5548 : }
5549 28 : else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5550 10 : TransactionIdDidCommit(xmax))
5551 2 : {
5552 : /*
5553 : * It's a committed update, so we gotta preserve him as updater of the
5554 : * tuple.
5555 : */
5556 : MultiXactStatus status;
5557 : MultiXactStatus new_status;
5558 :
5559 2 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5560 0 : status = MultiXactStatusUpdate;
5561 : else
5562 2 : status = MultiXactStatusNoKeyUpdate;
5563 :
5564 2 : new_status = get_mxact_status_for_lock(mode, is_update);
5565 :
5566 : /*
5567 : * since it's not running, it's obviously impossible for the old
5568 : * updater to be identical to the current one, so we need not check
5569 : * for that case as we do in the block above.
5570 : */
5571 2 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5572 2 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5573 : }
5574 : else
5575 : {
5576 : /*
5577 : * Can get here iff the locking/updating transaction was running when
5578 : * the infomask was extracted from the tuple, but finished before
5579 : * TransactionIdIsInProgress got to run. Deal with it as if there was
5580 : * no locker at all in the first place.
5581 : */
5582 16 : old_infomask |= HEAP_XMAX_INVALID;
5583 16 : goto l5;
5584 : }
5585 :
5586 4061458 : *result_infomask = new_infomask;
5587 4061458 : *result_infomask2 = new_infomask2;
5588 4061458 : *result_xmax = new_xmax;
5589 4061458 : }
5590 :
5591 : /*
5592 : * Subroutine for heap_lock_updated_tuple_rec.
5593 : *
5594 : * Given a hypothetical multixact status held by the transaction identified
5595 : * with the given xid, does the current transaction need to wait, fail, or can
5596 : * it continue if it wanted to acquire a lock of the given mode? "needwait"
5597 : * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5598 : * returned. If the lock is already held by the current transaction, return
5599 : * TM_SelfModified. In case of a conflict with another transaction, a
5600 : * different HeapTupleSatisfiesUpdate return code is returned.
5601 : *
5602 : * The held status is said to be hypothetical because it might correspond to a
5603 : * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5604 : * way for simplicity of API.
5605 : */
5606 : static TM_Result
5607 64 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5608 : LockTupleMode mode, HeapTuple tup,
5609 : bool *needwait)
5610 : {
5611 : MultiXactStatus wantedstatus;
5612 :
5613 64 : *needwait = false;
5614 64 : wantedstatus = get_mxact_status_for_lock(mode, false);
5615 :
5616 : /*
5617 : * Note: we *must* check TransactionIdIsInProgress before
5618 : * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5619 : * for an explanation.
5620 : */
5621 64 : if (TransactionIdIsCurrentTransactionId(xid))
5622 : {
5623 : /*
5624 : * The tuple has already been locked by our own transaction. This is
5625 : * very rare but can happen if multiple transactions are trying to
5626 : * lock an ancient version of the same tuple.
5627 : */
5628 0 : return TM_SelfModified;
5629 : }
5630 64 : else if (TransactionIdIsInProgress(xid))
5631 : {
5632 : /*
5633 : * If the locking transaction is running, what we do depends on
5634 : * whether the lock modes conflict: if they do, then we must wait for
5635 : * it to finish; otherwise we can fall through to lock this tuple
5636 : * version without waiting.
5637 : */
5638 32 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5639 32 : LOCKMODE_from_mxstatus(wantedstatus)))
5640 : {
5641 16 : *needwait = true;
5642 : }
5643 :
5644 : /*
5645 : * If we set needwait above, then this value doesn't matter;
5646 : * otherwise, this value signals to caller that it's okay to proceed.
5647 : */
5648 32 : return TM_Ok;
5649 : }
5650 32 : else if (TransactionIdDidAbort(xid))
5651 6 : return TM_Ok;
5652 26 : else if (TransactionIdDidCommit(xid))
5653 : {
5654 : /*
5655 : * The other transaction committed. If it was only a locker, then the
5656 : * lock is completely gone now and we can return success; but if it
5657 : * was an update, then what we do depends on whether the two lock
5658 : * modes conflict. If they conflict, then we must report error to
5659 : * caller. But if they don't, we can fall through to allow the current
5660 : * transaction to lock the tuple.
5661 : *
5662 : * Note: the reason we worry about ISUPDATE here is because as soon as
5663 : * a transaction ends, all its locks are gone and meaningless, and
5664 : * thus we can ignore them; whereas its updates persist. In the
5665 : * TransactionIdIsInProgress case, above, we don't need to check
5666 : * because we know the lock is still "alive" and thus a conflict needs
5667 : * always be checked.
5668 : */
5669 26 : if (!ISUPDATE_from_mxstatus(status))
5670 8 : return TM_Ok;
5671 :
5672 18 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5673 18 : LOCKMODE_from_mxstatus(wantedstatus)))
5674 : {
5675 : /* bummer */
5676 16 : if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
5677 12 : return TM_Updated;
5678 : else
5679 4 : return TM_Deleted;
5680 : }
5681 :
5682 2 : return TM_Ok;
5683 : }
5684 :
5685 : /* Not in progress, not aborted, not committed -- must have crashed */
5686 0 : return TM_Ok;
5687 : }
5688 :
5689 :
5690 : /*
5691 : * Recursive part of heap_lock_updated_tuple
5692 : *
5693 : * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5694 : * xid with the given mode; if this tuple is updated, recurse to lock the new
5695 : * version as well.
5696 : */
5697 : static TM_Result
5698 174 : heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5699 : LockTupleMode mode)
5700 : {
5701 : TM_Result result;
5702 : ItemPointerData tupid;
5703 : HeapTupleData mytup;
5704 : Buffer buf;
5705 : uint16 new_infomask,
5706 : new_infomask2,
5707 : old_infomask,
5708 : old_infomask2;
5709 : TransactionId xmax,
5710 : new_xmax;
5711 174 : TransactionId priorXmax = InvalidTransactionId;
5712 174 : bool cleared_all_frozen = false;
5713 : bool pinned_desired_page;
5714 174 : Buffer vmbuffer = InvalidBuffer;
5715 : BlockNumber block;
5716 :
5717 174 : ItemPointerCopy(tid, &tupid);
5718 :
5719 : for (;;)
5720 : {
5721 180 : new_infomask = 0;
5722 180 : new_xmax = InvalidTransactionId;
5723 180 : block = ItemPointerGetBlockNumber(&tupid);
5724 180 : ItemPointerCopy(&tupid, &(mytup.t_self));
5725 :
5726 180 : if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5727 : {
5728 : /*
5729 : * if we fail to find the updated version of the tuple, it's
5730 : * because it was vacuumed/pruned away after its creator
5731 : * transaction aborted. So behave as if we got to the end of the
5732 : * chain, and there's no further tuple to lock: return success to
5733 : * caller.
5734 : */
5735 0 : result = TM_Ok;
5736 0 : goto out_unlocked;
5737 : }
5738 :
5739 180 : l4:
5740 196 : CHECK_FOR_INTERRUPTS();
5741 :
5742 : /*
5743 : * Before locking the buffer, pin the visibility map page if it
5744 : * appears to be necessary. Since we haven't got the lock yet,
5745 : * someone else might be in the middle of changing this, so we'll need
5746 : * to recheck after we have the lock.
5747 : */
5748 196 : if (PageIsAllVisible(BufferGetPage(buf)))
5749 : {
5750 0 : visibilitymap_pin(rel, block, &vmbuffer);
5751 0 : pinned_desired_page = true;
5752 : }
5753 : else
5754 196 : pinned_desired_page = false;
5755 :
5756 196 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5757 :
5758 : /*
5759 : * If we didn't pin the visibility map page and the page has become
5760 : * all visible while we were busy locking the buffer, we'll have to
5761 : * unlock and re-lock, to avoid holding the buffer lock across I/O.
5762 : * That's a bit unfortunate, but hopefully shouldn't happen often.
5763 : *
5764 : * Note: in some paths through this function, we will reach here
5765 : * holding a pin on a vm page that may or may not be the one matching
5766 : * this page. If this page isn't all-visible, we won't use the vm
5767 : * page, but we hold onto such a pin till the end of the function.
5768 : */
5769 196 : if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
5770 : {
5771 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5772 0 : visibilitymap_pin(rel, block, &vmbuffer);
5773 0 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5774 : }
5775 :
5776 : /*
5777 : * Check the tuple XMIN against prior XMAX, if any. If we reached the
5778 : * end of the chain, we're done, so return success.
5779 : */
5780 202 : if (TransactionIdIsValid(priorXmax) &&
5781 6 : !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5782 : priorXmax))
5783 : {
5784 0 : result = TM_Ok;
5785 0 : goto out_locked;
5786 : }
5787 :
5788 : /*
5789 : * Also check Xmin: if this tuple was created by an aborted
5790 : * (sub)transaction, then we already locked the last live one in the
5791 : * chain, thus we're done, so return success.
5792 : */
5793 196 : if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
5794 : {
5795 26 : result = TM_Ok;
5796 26 : goto out_locked;
5797 : }
5798 :
5799 170 : old_infomask = mytup.t_data->t_infomask;
5800 170 : old_infomask2 = mytup.t_data->t_infomask2;
5801 170 : xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5802 :
5803 : /*
5804 : * If this tuple version has been updated or locked by some concurrent
5805 : * transaction(s), what we do depends on whether our lock mode
5806 : * conflicts with what those other transactions hold, and also on the
5807 : * status of them.
5808 : */
5809 170 : if (!(old_infomask & HEAP_XMAX_INVALID))
5810 : {
5811 : TransactionId rawxmax;
5812 : bool needwait;
5813 :
5814 60 : rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5815 60 : if (old_infomask & HEAP_XMAX_IS_MULTI)
5816 : {
5817 : int nmembers;
5818 : int i;
5819 : MultiXactMember *members;
5820 :
5821 : /*
5822 : * We don't need a test for pg_upgrade'd tuples: this is only
5823 : * applied to tuples after the first in an update chain. Said
5824 : * first tuple in the chain may well be locked-in-9.2-and-
5825 : * pg_upgraded, but that one was already locked by our caller,
5826 : * not us; and any subsequent ones cannot be because our
5827 : * caller must necessarily have obtained a snapshot later than
5828 : * the pg_upgrade itself.
5829 : */
5830 : Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5831 :
5832 2 : nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5833 2 : HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5834 8 : for (i = 0; i < nmembers; i++)
5835 : {
5836 6 : result = test_lockmode_for_conflict(members[i].status,
5837 6 : members[i].xid,
5838 : mode,
5839 : &mytup,
5840 : &needwait);
5841 :
5842 : /*
5843 : * If the tuple was already locked by ourselves in a
5844 : * previous iteration of this (say heap_lock_tuple was
5845 : * forced to restart the locking loop because of a change
5846 : * in xmax), then we hold the lock already on this tuple
5847 : * version and we don't need to do anything; and this is
5848 : * not an error condition either. We just need to skip
5849 : * this tuple and continue locking the next version in the
5850 : * update chain.
5851 : */
5852 6 : if (result == TM_SelfModified)
5853 : {
5854 0 : pfree(members);
5855 0 : goto next;
5856 : }
5857 :
5858 6 : if (needwait)
5859 : {
5860 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5861 0 : XactLockTableWait(members[i].xid, rel,
5862 : &mytup.t_self,
5863 : XLTW_LockUpdated);
5864 0 : pfree(members);
5865 0 : goto l4;
5866 : }
5867 6 : if (result != TM_Ok)
5868 : {
5869 0 : pfree(members);
5870 0 : goto out_locked;
5871 : }
5872 : }
5873 2 : if (members)
5874 2 : pfree(members);
5875 : }
5876 : else
5877 : {
5878 : MultiXactStatus status;
5879 :
5880 : /*
5881 : * For a non-multi Xmax, we first need to compute the
5882 : * corresponding MultiXactStatus by using the infomask bits.
5883 : */
5884 58 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5885 : {
5886 20 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5887 20 : status = MultiXactStatusForKeyShare;
5888 0 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5889 0 : status = MultiXactStatusForShare;
5890 0 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5891 : {
5892 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5893 0 : status = MultiXactStatusForUpdate;
5894 : else
5895 0 : status = MultiXactStatusForNoKeyUpdate;
5896 : }
5897 : else
5898 : {
5899 : /*
5900 : * LOCK_ONLY present alone (a pg_upgraded tuple marked
5901 : * as share-locked in the old cluster) shouldn't be
5902 : * seen in the middle of an update chain.
5903 : */
5904 0 : elog(ERROR, "invalid lock status in tuple");
5905 : }
5906 : }
5907 : else
5908 : {
5909 : /* it's an update, but which kind? */
5910 38 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5911 28 : status = MultiXactStatusUpdate;
5912 : else
5913 10 : status = MultiXactStatusNoKeyUpdate;
5914 : }
5915 :
5916 58 : result = test_lockmode_for_conflict(status, rawxmax, mode,
5917 : &mytup, &needwait);
5918 :
5919 : /*
5920 : * If the tuple was already locked by ourselves in a previous
5921 : * iteration of this (say heap_lock_tuple was forced to
5922 : * restart the locking loop because of a change in xmax), then
5923 : * we hold the lock already on this tuple version and we don't
5924 : * need to do anything; and this is not an error condition
5925 : * either. We just need to skip this tuple and continue
5926 : * locking the next version in the update chain.
5927 : */
5928 58 : if (result == TM_SelfModified)
5929 0 : goto next;
5930 :
5931 58 : if (needwait)
5932 : {
5933 16 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5934 16 : XactLockTableWait(rawxmax, rel, &mytup.t_self,
5935 : XLTW_LockUpdated);
5936 16 : goto l4;
5937 : }
5938 42 : if (result != TM_Ok)
5939 : {
5940 16 : goto out_locked;
5941 : }
5942 : }
5943 : }
5944 :
5945 : /* compute the new Xmax and infomask values for the tuple ... */
5946 138 : compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5947 : xid, mode, false,
5948 : &new_xmax, &new_infomask, &new_infomask2);
5949 :
5950 138 : if (PageIsAllVisible(BufferGetPage(buf)) &&
5951 0 : visibilitymap_clear(rel, block, vmbuffer,
5952 : VISIBILITYMAP_ALL_FROZEN))
5953 0 : cleared_all_frozen = true;
5954 :
5955 138 : START_CRIT_SECTION();
5956 :
5957 : /* ... and set them */
5958 138 : HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5959 138 : mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5960 138 : mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5961 138 : mytup.t_data->t_infomask |= new_infomask;
5962 138 : mytup.t_data->t_infomask2 |= new_infomask2;
5963 :
5964 138 : MarkBufferDirty(buf);
5965 :
5966 : /* XLOG stuff */
5967 138 : if (RelationNeedsWAL(rel))
5968 : {
5969 : xl_heap_lock_updated xlrec;
5970 : XLogRecPtr recptr;
5971 138 : Page page = BufferGetPage(buf);
5972 :
5973 138 : XLogBeginInsert();
5974 138 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
5975 :
5976 138 : xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
5977 138 : xlrec.xmax = new_xmax;
5978 138 : xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
5979 138 : xlrec.flags =
5980 138 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5981 :
5982 138 : XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
5983 :
5984 138 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
5985 :
5986 138 : PageSetLSN(page, recptr);
5987 : }
5988 :
5989 138 : END_CRIT_SECTION();
5990 :
5991 138 : next:
5992 : /* if we find the end of update chain, we're done. */
5993 276 : if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
5994 276 : HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
5995 146 : ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
5996 8 : HeapTupleHeaderIsOnlyLocked(mytup.t_data))
5997 : {
5998 132 : result = TM_Ok;
5999 132 : goto out_locked;
6000 : }
6001 :
6002 : /* tail recursion */
6003 6 : priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
6004 6 : ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
6005 6 : UnlockReleaseBuffer(buf);
6006 : }
6007 :
6008 : result = TM_Ok;
6009 :
6010 174 : out_locked:
6011 174 : UnlockReleaseBuffer(buf);
6012 :
6013 174 : out_unlocked:
6014 174 : if (vmbuffer != InvalidBuffer)
6015 0 : ReleaseBuffer(vmbuffer);
6016 :
6017 174 : return result;
6018 : }
6019 :
6020 : /*
6021 : * heap_lock_updated_tuple
6022 : * Follow update chain when locking an updated tuple, acquiring locks (row
6023 : * marks) on the updated versions.
6024 : *
6025 : * The initial tuple is assumed to be already locked.
6026 : *
6027 : * This function doesn't check visibility, it just unconditionally marks the
6028 : * tuple(s) as locked. If any tuple in the updated chain is being deleted
6029 : * concurrently (or updated with the key being modified), sleep until the
6030 : * transaction doing it is finished.
6031 : *
6032 : * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6033 : * when we have to wait for other transactions to release them, as opposed to
6034 : * what heap_lock_tuple does. The reason is that having more than one
6035 : * transaction walking the chain is probably uncommon enough that risk of
6036 : * starvation is not likely: one of the preconditions for being here is that
6037 : * the snapshot in use predates the update that created this tuple (because we
6038 : * started at an earlier version of the tuple), but at the same time such a
6039 : * transaction cannot be using repeatable read or serializable isolation
6040 : * levels, because that would lead to a serializability failure.
6041 : */
6042 : static TM_Result
6043 204 : heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
6044 : TransactionId xid, LockTupleMode mode)
6045 : {
6046 : /*
6047 : * If the tuple has not been updated, or has moved into another partition
6048 : * (effectively a delete) stop here.
6049 : */
6050 204 : if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) &&
6051 200 : !ItemPointerEquals(&tuple->t_self, ctid))
6052 : {
6053 : /*
6054 : * If this is the first possibly-multixact-able operation in the
6055 : * current transaction, set my per-backend OldestMemberMXactId
6056 : * setting. We can be certain that the transaction will never become a
6057 : * member of any older MultiXactIds than that. (We have to do this
6058 : * even if we end up just using our own TransactionId below, since
6059 : * some other backend could incorporate our XID into a MultiXact
6060 : * immediately afterwards.)
6061 : */
6062 174 : MultiXactIdSetOldestMember();
6063 :
6064 174 : return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
6065 : }
6066 :
6067 : /* nothing to lock */
6068 30 : return TM_Ok;
6069 : }
6070 :
6071 : /*
6072 : * heap_finish_speculative - mark speculative insertion as successful
6073 : *
6074 : * To successfully finish a speculative insertion we have to clear speculative
6075 : * token from tuple. To do so the t_ctid field, which will contain a
6076 : * speculative token value, is modified in place to point to the tuple itself,
6077 : * which is characteristic of a newly inserted ordinary tuple.
6078 : *
6079 : * NB: It is not ok to commit without either finishing or aborting a
6080 : * speculative insertion. We could treat speculative tuples of committed
6081 : * transactions implicitly as completed, but then we would have to be prepared
6082 : * to deal with speculative tokens on committed tuples. That wouldn't be
6083 : * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6084 : * but clearing the token at completion isn't very expensive either.
6085 : * An explicit confirmation WAL record also makes logical decoding simpler.
6086 : */
6087 : void
6088 4124 : heap_finish_speculative(Relation relation, ItemPointer tid)
6089 : {
6090 : Buffer buffer;
6091 : Page page;
6092 : OffsetNumber offnum;
6093 4124 : ItemId lp = NULL;
6094 : HeapTupleHeader htup;
6095 :
6096 4124 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
6097 4124 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6098 4124 : page = BufferGetPage(buffer);
6099 :
6100 4124 : offnum = ItemPointerGetOffsetNumber(tid);
6101 4124 : if (PageGetMaxOffsetNumber(page) >= offnum)
6102 4124 : lp = PageGetItemId(page, offnum);
6103 :
6104 4124 : if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6105 0 : elog(ERROR, "invalid lp");
6106 :
6107 4124 : htup = (HeapTupleHeader) PageGetItem(page, lp);
6108 :
6109 : /* NO EREPORT(ERROR) from here till changes are logged */
6110 4124 : START_CRIT_SECTION();
6111 :
6112 : Assert(HeapTupleHeaderIsSpeculative(htup));
6113 :
6114 4124 : MarkBufferDirty(buffer);
6115 :
6116 : /*
6117 : * Replace the speculative insertion token with a real t_ctid, pointing to
6118 : * itself like it does on regular tuples.
6119 : */
6120 4124 : htup->t_ctid = *tid;
6121 :
6122 : /* XLOG stuff */
6123 4124 : if (RelationNeedsWAL(relation))
6124 : {
6125 : xl_heap_confirm xlrec;
6126 : XLogRecPtr recptr;
6127 :
6128 4106 : xlrec.offnum = ItemPointerGetOffsetNumber(tid);
6129 :
6130 4106 : XLogBeginInsert();
6131 :
6132 : /* We want the same filtering on this as on a plain insert */
6133 4106 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
6134 :
6135 4106 : XLogRegisterData(&xlrec, SizeOfHeapConfirm);
6136 4106 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6137 :
6138 4106 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6139 :
6140 4106 : PageSetLSN(page, recptr);
6141 : }
6142 :
6143 4124 : END_CRIT_SECTION();
6144 :
6145 4124 : UnlockReleaseBuffer(buffer);
6146 4124 : }
6147 :
6148 : /*
6149 : * heap_abort_speculative - kill a speculatively inserted tuple
6150 : *
6151 : * Marks a tuple that was speculatively inserted in the same command as dead,
6152 : * by setting its xmin as invalid. That makes it immediately appear as dead
6153 : * to all transactions, including our own. In particular, it makes
6154 : * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6155 : * inserting a duplicate key value won't unnecessarily wait for our whole
6156 : * transaction to finish (it'll just wait for our speculative insertion to
6157 : * finish).
6158 : *
6159 : * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6160 : * that arise due to a mutual dependency that is not user visible. By
6161 : * definition, unprincipled deadlocks cannot be prevented by the user
6162 : * reordering lock acquisition in client code, because the implementation level
6163 : * lock acquisitions are not under the user's direct control. If speculative
6164 : * inserters did not take this precaution, then under high concurrency they
6165 : * could deadlock with each other, which would not be acceptable.
6166 : *
6167 : * This is somewhat redundant with heap_delete, but we prefer to have a
6168 : * dedicated routine with stripped down requirements. Note that this is also
6169 : * used to delete the TOAST tuples created during speculative insertion.
6170 : *
6171 : * This routine does not affect logical decoding as it only looks at
6172 : * confirmation records.
6173 : */
6174 : void
6175 20 : heap_abort_speculative(Relation relation, ItemPointer tid)
6176 : {
6177 20 : TransactionId xid = GetCurrentTransactionId();
6178 : ItemId lp;
6179 : HeapTupleData tp;
6180 : Page page;
6181 : BlockNumber block;
6182 : Buffer buffer;
6183 :
6184 : Assert(ItemPointerIsValid(tid));
6185 :
6186 20 : block = ItemPointerGetBlockNumber(tid);
6187 20 : buffer = ReadBuffer(relation, block);
6188 20 : page = BufferGetPage(buffer);
6189 :
6190 20 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6191 :
6192 : /*
6193 : * Page can't be all visible, we just inserted into it, and are still
6194 : * running.
6195 : */
6196 : Assert(!PageIsAllVisible(page));
6197 :
6198 20 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6199 : Assert(ItemIdIsNormal(lp));
6200 :
6201 20 : tp.t_tableOid = RelationGetRelid(relation);
6202 20 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6203 20 : tp.t_len = ItemIdGetLength(lp);
6204 20 : tp.t_self = *tid;
6205 :
6206 : /*
6207 : * Sanity check that the tuple really is a speculatively inserted tuple,
6208 : * inserted by us.
6209 : */
6210 20 : if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6211 0 : elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6212 20 : if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6213 0 : elog(ERROR, "attempted to kill a non-speculative tuple");
6214 : Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
6215 :
6216 : /*
6217 : * No need to check for serializable conflicts here. There is never a
6218 : * need for a combo CID, either. No need to extract replica identity, or
6219 : * do anything special with infomask bits.
6220 : */
6221 :
6222 20 : START_CRIT_SECTION();
6223 :
6224 : /*
6225 : * The tuple will become DEAD immediately. Flag that this page is a
6226 : * candidate for pruning by setting xmin to TransactionXmin. While not
6227 : * immediately prunable, it is the oldest xid we can cheaply determine
6228 : * that's safe against wraparound / being older than the table's
6229 : * relfrozenxid. To defend against the unlikely case of a new relation
6230 : * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6231 : * if so (vacuum can't subsequently move relfrozenxid to beyond
6232 : * TransactionXmin, so there's no race here).
6233 : */
6234 : Assert(TransactionIdIsValid(TransactionXmin));
6235 : {
6236 20 : TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6237 : TransactionId prune_xid;
6238 :
6239 20 : if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
6240 0 : prune_xid = relfrozenxid;
6241 : else
6242 20 : prune_xid = TransactionXmin;
6243 20 : PageSetPrunable(page, prune_xid);
6244 : }
6245 :
6246 : /* store transaction information of xact deleting the tuple */
6247 20 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
6248 20 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6249 :
6250 : /*
6251 : * Set the tuple header xmin to InvalidTransactionId. This makes the
6252 : * tuple immediately invisible everyone. (In particular, to any
6253 : * transactions waiting on the speculative token, woken up later.)
6254 : */
6255 20 : HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
6256 :
6257 : /* Clear the speculative insertion token too */
6258 20 : tp.t_data->t_ctid = tp.t_self;
6259 :
6260 20 : MarkBufferDirty(buffer);
6261 :
6262 : /*
6263 : * XLOG stuff
6264 : *
6265 : * The WAL records generated here match heap_delete(). The same recovery
6266 : * routines are used.
6267 : */
6268 20 : if (RelationNeedsWAL(relation))
6269 : {
6270 : xl_heap_delete xlrec;
6271 : XLogRecPtr recptr;
6272 :
6273 20 : xlrec.flags = XLH_DELETE_IS_SUPER;
6274 40 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
6275 20 : tp.t_data->t_infomask2);
6276 20 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
6277 20 : xlrec.xmax = xid;
6278 :
6279 20 : XLogBeginInsert();
6280 20 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
6281 20 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6282 :
6283 : /* No replica identity & replication origin logged */
6284 :
6285 20 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6286 :
6287 20 : PageSetLSN(page, recptr);
6288 : }
6289 :
6290 20 : END_CRIT_SECTION();
6291 :
6292 20 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6293 :
6294 20 : if (HeapTupleHasExternal(&tp))
6295 : {
6296 : Assert(!IsToastRelation(relation));
6297 2 : heap_toast_delete(relation, &tp, true);
6298 : }
6299 :
6300 : /*
6301 : * Never need to mark tuple for invalidation, since catalogs don't support
6302 : * speculative insertion
6303 : */
6304 :
6305 : /* Now we can release the buffer */
6306 20 : ReleaseBuffer(buffer);
6307 :
6308 : /* count deletion, as we counted the insertion too */
6309 20 : pgstat_count_heap_delete(relation);
6310 20 : }
6311 :
6312 : /*
6313 : * heap_inplace_lock - protect inplace update from concurrent heap_update()
6314 : *
6315 : * Evaluate whether the tuple's state is compatible with a no-key update.
6316 : * Current transaction rowmarks are fine, as is KEY SHARE from any
6317 : * transaction. If compatible, return true with the buffer exclusive-locked,
6318 : * and the caller must release that by calling
6319 : * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6320 : * an error. Otherwise, call release_callback(arg), wait for blocking
6321 : * transactions to end, and return false.
6322 : *
6323 : * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6324 : * DDL, this doesn't guarantee any particular predicate locking.
6325 : *
6326 : * One could modify this to return true for tuples with delete in progress,
6327 : * All inplace updaters take a lock that conflicts with DROP. If explicit
6328 : * "DELETE FROM pg_class" is in progress, we'll wait for it like we would an
6329 : * update.
6330 : *
6331 : * Readers of inplace-updated fields expect changes to those fields are
6332 : * durable. For example, vac_truncate_clog() reads datfrozenxid from
6333 : * pg_database tuples via catalog snapshots. A future snapshot must not
6334 : * return a lower datfrozenxid for the same database OID (lower in the
6335 : * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6336 : * tuple can start while we hold a lock on its buffer. In cases like
6337 : * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6338 : * to this transaction. ROLLBACK then is one case where it's okay to lose
6339 : * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6340 : * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6341 : * committed tuple.)
6342 : *
6343 : * In principle, we could avoid waiting by overwriting every tuple in the
6344 : * updated tuple chain. Reader expectations permit updating a tuple only if
6345 : * it's aborted, is the tail of the chain, or we already updated the tuple
6346 : * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6347 : * order from tail to head. That would imply either (a) mutating all tuples
6348 : * in one critical section or (b) accepting a chance of partial completion.
6349 : * Partial completion of a relfrozenxid update would have the weird
6350 : * consequence that the table's next VACUUM could see the table's relfrozenxid
6351 : * move forward between vacuum_get_cutoffs() and finishing.
6352 : */
6353 : bool
6354 409048 : heap_inplace_lock(Relation relation,
6355 : HeapTuple oldtup_ptr, Buffer buffer,
6356 : void (*release_callback) (void *), void *arg)
6357 : {
6358 409048 : HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6359 : TM_Result result;
6360 : bool ret;
6361 :
6362 : #ifdef USE_ASSERT_CHECKING
6363 : if (RelationGetRelid(relation) == RelationRelationId)
6364 : check_inplace_rel_lock(oldtup_ptr);
6365 : #endif
6366 :
6367 : Assert(BufferIsValid(buffer));
6368 :
6369 : /*
6370 : * Construct shared cache inval if necessary. Because we pass a tuple
6371 : * version without our own inplace changes or inplace changes other
6372 : * sessions complete while we wait for locks, inplace update mustn't
6373 : * change catcache lookup keys. But we aren't bothering with index
6374 : * updates either, so that's true a fortiori. After LockBuffer(), it
6375 : * would be too late, because this might reach a
6376 : * CatalogCacheInitializeCache() that locks "buffer".
6377 : */
6378 409048 : CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL);
6379 :
6380 409048 : LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6381 409048 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6382 :
6383 : /*----------
6384 : * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6385 : *
6386 : * - wait unconditionally
6387 : * - already locked tuple above, since inplace needs that unconditionally
6388 : * - don't recheck header after wait: simpler to defer to next iteration
6389 : * - don't try to continue even if the updater aborts: likewise
6390 : * - no crosscheck
6391 : */
6392 409048 : result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
6393 : buffer);
6394 :
6395 409048 : if (result == TM_Invisible)
6396 : {
6397 : /* no known way this can happen */
6398 0 : ereport(ERROR,
6399 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6400 : errmsg_internal("attempted to overwrite invisible tuple")));
6401 : }
6402 409048 : else if (result == TM_SelfModified)
6403 : {
6404 : /*
6405 : * CREATE INDEX might reach this if an expression is silly enough to
6406 : * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6407 : * statements might get here after a heap_update() of the same row, in
6408 : * the absence of an intervening CommandCounterIncrement().
6409 : */
6410 0 : ereport(ERROR,
6411 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6412 : errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6413 : }
6414 409048 : else if (result == TM_BeingModified)
6415 : {
6416 : TransactionId xwait;
6417 : uint16 infomask;
6418 :
6419 82 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
6420 82 : infomask = oldtup.t_data->t_infomask;
6421 :
6422 82 : if (infomask & HEAP_XMAX_IS_MULTI)
6423 : {
6424 10 : LockTupleMode lockmode = LockTupleNoKeyExclusive;
6425 10 : MultiXactStatus mxact_status = MultiXactStatusNoKeyUpdate;
6426 : int remain;
6427 :
6428 10 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
6429 : lockmode, NULL))
6430 : {
6431 4 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6432 4 : release_callback(arg);
6433 4 : ret = false;
6434 4 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
6435 : relation, &oldtup.t_self, XLTW_Update,
6436 : &remain);
6437 : }
6438 : else
6439 6 : ret = true;
6440 : }
6441 72 : else if (TransactionIdIsCurrentTransactionId(xwait))
6442 2 : ret = true;
6443 70 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
6444 2 : ret = true;
6445 : else
6446 : {
6447 68 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6448 68 : release_callback(arg);
6449 68 : ret = false;
6450 68 : XactLockTableWait(xwait, relation, &oldtup.t_self,
6451 : XLTW_Update);
6452 : }
6453 : }
6454 : else
6455 : {
6456 408966 : ret = (result == TM_Ok);
6457 408966 : if (!ret)
6458 : {
6459 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6460 0 : release_callback(arg);
6461 : }
6462 : }
6463 :
6464 : /*
6465 : * GetCatalogSnapshot() relies on invalidation messages to know when to
6466 : * take a new snapshot. COMMIT of xwait is responsible for sending the
6467 : * invalidation. We're not acquiring heavyweight locks sufficient to
6468 : * block if not yet sent, so we must take a new snapshot to ensure a later
6469 : * attempt has a fair chance. While we don't need this if xwait aborted,
6470 : * don't bother optimizing that.
6471 : */
6472 409048 : if (!ret)
6473 : {
6474 72 : UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6475 72 : ForgetInplace_Inval();
6476 72 : InvalidateCatalogSnapshot();
6477 : }
6478 409048 : return ret;
6479 : }
6480 :
6481 : /*
6482 : * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6483 : *
6484 : * The tuple cannot change size, and therefore its header fields and null
6485 : * bitmap (if any) don't change either.
6486 : *
6487 : * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6488 : */
6489 : void
6490 161474 : heap_inplace_update_and_unlock(Relation relation,
6491 : HeapTuple oldtup, HeapTuple tuple,
6492 : Buffer buffer)
6493 : {
6494 161474 : HeapTupleHeader htup = oldtup->t_data;
6495 : uint32 oldlen;
6496 : uint32 newlen;
6497 : char *dst;
6498 : char *src;
6499 161474 : int nmsgs = 0;
6500 161474 : SharedInvalidationMessage *invalMessages = NULL;
6501 161474 : bool RelcacheInitFileInval = false;
6502 :
6503 : Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6504 161474 : oldlen = oldtup->t_len - htup->t_hoff;
6505 161474 : newlen = tuple->t_len - tuple->t_data->t_hoff;
6506 161474 : if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6507 0 : elog(ERROR, "wrong tuple length");
6508 :
6509 161474 : dst = (char *) htup + htup->t_hoff;
6510 161474 : src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6511 :
6512 : /* Like RecordTransactionCommit(), log only if needed */
6513 161474 : if (XLogStandbyInfoActive())
6514 99516 : nmsgs = inplaceGetInvalidationMessages(&invalMessages,
6515 : &RelcacheInitFileInval);
6516 :
6517 : /*
6518 : * Unlink relcache init files as needed. If unlinking, acquire
6519 : * RelCacheInitLock until after associated invalidations. By doing this
6520 : * in advance, if we checkpoint and then crash between inplace
6521 : * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6522 : * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6523 : * neglect to PANIC on EIO.
6524 : */
6525 161474 : PreInplace_Inval();
6526 :
6527 : /*----------
6528 : * NO EREPORT(ERROR) from here till changes are complete
6529 : *
6530 : * Our buffer lock won't stop a reader having already pinned and checked
6531 : * visibility for this tuple. Hence, we write WAL first, then mutate the
6532 : * buffer. Like in MarkBufferDirtyHint() or RecordTransactionCommit(),
6533 : * checkpoint delay makes that acceptable. With the usual order of
6534 : * changes, a crash after memcpy() and before XLogInsert() could allow
6535 : * datfrozenxid to overtake relfrozenxid:
6536 : *
6537 : * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6538 : * ["R" is a VACUUM tbl]
6539 : * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6540 : * D: systable_getnext() returns pg_class tuple of tbl
6541 : * R: memcpy() into pg_class tuple of tbl
6542 : * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6543 : * [crash]
6544 : * [recovery restores datfrozenxid w/o relfrozenxid]
6545 : *
6546 : * Mimic MarkBufferDirtyHint() subroutine XLogSaveBufferForHint().
6547 : * Specifically, use DELAY_CHKPT_START, and copy the buffer to the stack.
6548 : * The stack copy facilitates a FPI of the post-mutation block before we
6549 : * accept other sessions seeing it. DELAY_CHKPT_START allows us to
6550 : * XLogInsert() before MarkBufferDirty(). Since XLogSaveBufferForHint()
6551 : * can operate under BUFFER_LOCK_SHARED, it can't avoid DELAY_CHKPT_START.
6552 : * This function, however, likely could avoid it with the following order
6553 : * of operations: MarkBufferDirty(), XLogInsert(), memcpy(). Opt to use
6554 : * DELAY_CHKPT_START here, too, as a way to have fewer distinct code
6555 : * patterns to analyze. Inplace update isn't so frequent that it should
6556 : * pursue the small optimization of skipping DELAY_CHKPT_START.
6557 : */
6558 : Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
6559 161474 : START_CRIT_SECTION();
6560 161474 : MyProc->delayChkptFlags |= DELAY_CHKPT_START;
6561 :
6562 : /* XLOG stuff */
6563 161474 : if (RelationNeedsWAL(relation))
6564 : {
6565 : xl_heap_inplace xlrec;
6566 : PGAlignedBlock copied_buffer;
6567 161458 : char *origdata = (char *) BufferGetBlock(buffer);
6568 161458 : Page page = BufferGetPage(buffer);
6569 161458 : uint16 lower = ((PageHeader) page)->pd_lower;
6570 161458 : uint16 upper = ((PageHeader) page)->pd_upper;
6571 : uintptr_t dst_offset_in_block;
6572 : RelFileLocator rlocator;
6573 : ForkNumber forkno;
6574 : BlockNumber blkno;
6575 : XLogRecPtr recptr;
6576 :
6577 161458 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6578 161458 : xlrec.dbId = MyDatabaseId;
6579 161458 : xlrec.tsId = MyDatabaseTableSpace;
6580 161458 : xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6581 161458 : xlrec.nmsgs = nmsgs;
6582 :
6583 161458 : XLogBeginInsert();
6584 161458 : XLogRegisterData(&xlrec, MinSizeOfHeapInplace);
6585 161458 : if (nmsgs != 0)
6586 70216 : XLogRegisterData(invalMessages,
6587 : nmsgs * sizeof(SharedInvalidationMessage));
6588 :
6589 : /* register block matching what buffer will look like after changes */
6590 161458 : memcpy(copied_buffer.data, origdata, lower);
6591 161458 : memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
6592 161458 : dst_offset_in_block = dst - origdata;
6593 161458 : memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
6594 161458 : BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6595 : Assert(forkno == MAIN_FORKNUM);
6596 161458 : XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6597 : REGBUF_STANDARD);
6598 161458 : XLogRegisterBufData(0, src, newlen);
6599 :
6600 : /* inplace updates aren't decoded atm, don't log the origin */
6601 :
6602 161458 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6603 :
6604 161458 : PageSetLSN(page, recptr);
6605 : }
6606 :
6607 161474 : memcpy(dst, src, newlen);
6608 :
6609 161474 : MarkBufferDirty(buffer);
6610 :
6611 161474 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6612 :
6613 : /*
6614 : * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6615 : * do this before UnlockTuple().
6616 : *
6617 : * If we're mutating a tuple visible only to this transaction, there's an
6618 : * equivalent transactional inval from the action that created the tuple,
6619 : * and this inval is superfluous.
6620 : */
6621 161474 : AtInplace_Inval();
6622 :
6623 161474 : MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
6624 161474 : END_CRIT_SECTION();
6625 161474 : UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6626 :
6627 161474 : AcceptInvalidationMessages(); /* local processing of just-sent inval */
6628 :
6629 : /*
6630 : * Queue a transactional inval. The immediate invalidation we just sent
6631 : * is the only one known to be necessary. To reduce risk from the
6632 : * transition to immediate invalidation, continue sending a transactional
6633 : * invalidation like we've long done. Third-party code might rely on it.
6634 : */
6635 161474 : if (!IsBootstrapProcessingMode())
6636 132174 : CacheInvalidateHeapTuple(relation, tuple, NULL);
6637 161474 : }
6638 :
6639 : /*
6640 : * heap_inplace_unlock - reverse of heap_inplace_lock
6641 : */
6642 : void
6643 247502 : heap_inplace_unlock(Relation relation,
6644 : HeapTuple oldtup, Buffer buffer)
6645 : {
6646 247502 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6647 247502 : UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
6648 247502 : ForgetInplace_Inval();
6649 247502 : }
6650 :
6651 : #define FRM_NOOP 0x0001
6652 : #define FRM_INVALIDATE_XMAX 0x0002
6653 : #define FRM_RETURN_IS_XID 0x0004
6654 : #define FRM_RETURN_IS_MULTI 0x0008
6655 : #define FRM_MARK_COMMITTED 0x0010
6656 :
6657 : /*
6658 : * FreezeMultiXactId
6659 : * Determine what to do during freezing when a tuple is marked by a
6660 : * MultiXactId.
6661 : *
6662 : * "flags" is an output value; it's used to tell caller what to do on return.
6663 : * "pagefrz" is an input/output value, used to manage page level freezing.
6664 : *
6665 : * Possible values that we can set in "flags":
6666 : * FRM_NOOP
6667 : * don't do anything -- keep existing Xmax
6668 : * FRM_INVALIDATE_XMAX
6669 : * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6670 : * FRM_RETURN_IS_XID
6671 : * The Xid return value is a single update Xid to set as xmax.
6672 : * FRM_MARK_COMMITTED
6673 : * Xmax can be marked as HEAP_XMAX_COMMITTED
6674 : * FRM_RETURN_IS_MULTI
6675 : * The return value is a new MultiXactId to set as new Xmax.
6676 : * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6677 : *
6678 : * Caller delegates control of page freezing to us. In practice we always
6679 : * force freezing of caller's page unless FRM_NOOP processing is indicated.
6680 : * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6681 : * can never be left behind. We freely choose when and how to process each
6682 : * Multi, without ever violating the cutoff postconditions for freezing.
6683 : *
6684 : * It's useful to remove Multis on a proactive timeline (relative to freezing
6685 : * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6686 : * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6687 : * misses through eager processing.
6688 : *
6689 : * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6690 : * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6691 : * This can usually be put off, which is usually enough to avoid it altogether.
6692 : * Allocating new multis during VACUUM should be avoided on general principle;
6693 : * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6694 : * its own special risks.
6695 : *
6696 : * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6697 : * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6698 : *
6699 : * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6700 : * have already forced page-level freezing, since that might incur the same
6701 : * SLRU buffer misses that we specifically intended to avoid by freezing.
6702 : */
6703 : static TransactionId
6704 14 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
6705 : const struct VacuumCutoffs *cutoffs, uint16 *flags,
6706 : HeapPageFreeze *pagefrz)
6707 : {
6708 : TransactionId newxmax;
6709 : MultiXactMember *members;
6710 : int nmembers;
6711 : bool need_replace;
6712 : int nnewmembers;
6713 : MultiXactMember *newmembers;
6714 : bool has_lockers;
6715 : TransactionId update_xid;
6716 : bool update_committed;
6717 : TransactionId FreezePageRelfrozenXid;
6718 :
6719 14 : *flags = 0;
6720 :
6721 : /* We should only be called in Multis */
6722 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6723 :
6724 28 : if (!MultiXactIdIsValid(multi) ||
6725 14 : HEAP_LOCKED_UPGRADED(t_infomask))
6726 : {
6727 0 : *flags |= FRM_INVALIDATE_XMAX;
6728 0 : pagefrz->freeze_required = true;
6729 0 : return InvalidTransactionId;
6730 : }
6731 14 : else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
6732 0 : ereport(ERROR,
6733 : (errcode(ERRCODE_DATA_CORRUPTED),
6734 : errmsg_internal("found multixact %u from before relminmxid %u",
6735 : multi, cutoffs->relminmxid)));
6736 14 : else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6737 : {
6738 : TransactionId update_xact;
6739 :
6740 : /*
6741 : * This old multi cannot possibly have members still running, but
6742 : * verify just in case. If it was a locker only, it can be removed
6743 : * without any further consideration; but if it contained an update,
6744 : * we might need to preserve it.
6745 : */
6746 10 : if (MultiXactIdIsRunning(multi,
6747 10 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6748 0 : ereport(ERROR,
6749 : (errcode(ERRCODE_DATA_CORRUPTED),
6750 : errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6751 : multi, cutoffs->OldestMxact)));
6752 :
6753 10 : if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6754 : {
6755 10 : *flags |= FRM_INVALIDATE_XMAX;
6756 10 : pagefrz->freeze_required = true;
6757 10 : return InvalidTransactionId;
6758 : }
6759 :
6760 : /* replace multi with single XID for its updater? */
6761 0 : update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6762 0 : if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
6763 0 : ereport(ERROR,
6764 : (errcode(ERRCODE_DATA_CORRUPTED),
6765 : errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6766 : multi, update_xact,
6767 : cutoffs->relfrozenxid)));
6768 0 : else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6769 : {
6770 : /*
6771 : * Updater XID has to have aborted (otherwise the tuple would have
6772 : * been pruned away instead, since updater XID is < OldestXmin).
6773 : * Just remove xmax.
6774 : */
6775 0 : if (TransactionIdDidCommit(update_xact))
6776 0 : ereport(ERROR,
6777 : (errcode(ERRCODE_DATA_CORRUPTED),
6778 : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6779 : multi, update_xact,
6780 : cutoffs->OldestXmin)));
6781 0 : *flags |= FRM_INVALIDATE_XMAX;
6782 0 : pagefrz->freeze_required = true;
6783 0 : return InvalidTransactionId;
6784 : }
6785 :
6786 : /* Have to keep updater XID as new xmax */
6787 0 : *flags |= FRM_RETURN_IS_XID;
6788 0 : pagefrz->freeze_required = true;
6789 0 : return update_xact;
6790 : }
6791 :
6792 : /*
6793 : * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6794 : * need to walk the whole members array to figure out what to do, if
6795 : * anything.
6796 : */
6797 : nmembers =
6798 4 : GetMultiXactIdMembers(multi, &members, false,
6799 4 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6800 4 : if (nmembers <= 0)
6801 : {
6802 : /* Nothing worth keeping */
6803 0 : *flags |= FRM_INVALIDATE_XMAX;
6804 0 : pagefrz->freeze_required = true;
6805 0 : return InvalidTransactionId;
6806 : }
6807 :
6808 : /*
6809 : * The FRM_NOOP case is the only case where we might need to ratchet back
6810 : * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6811 : * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6812 : * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6813 : * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6814 : * trackers managed by VACUUM being ratcheting back by xmax to the degree
6815 : * required to make it safe to leave xmax undisturbed, independent of
6816 : * whether or not page freezing is triggered somewhere else.
6817 : *
6818 : * Our policy is to force freezing in every case other than FRM_NOOP,
6819 : * which obviates the need to maintain either set of trackers, anywhere.
6820 : * Every other case will reliably execute a freeze plan for xmax that
6821 : * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6822 : * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6823 : * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6824 : * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6825 : */
6826 4 : need_replace = false;
6827 4 : FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
6828 8 : for (int i = 0; i < nmembers; i++)
6829 : {
6830 6 : TransactionId xid = members[i].xid;
6831 :
6832 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6833 :
6834 6 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6835 : {
6836 : /* Can't violate the FreezeLimit postcondition */
6837 2 : need_replace = true;
6838 2 : break;
6839 : }
6840 4 : if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
6841 0 : FreezePageRelfrozenXid = xid;
6842 : }
6843 :
6844 : /* Can't violate the MultiXactCutoff postcondition, either */
6845 4 : if (!need_replace)
6846 2 : need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
6847 :
6848 4 : if (!need_replace)
6849 : {
6850 : /*
6851 : * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6852 : * both together to make it safe to retain this particular multi after
6853 : * freezing its page
6854 : */
6855 2 : *flags |= FRM_NOOP;
6856 2 : pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6857 2 : if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
6858 0 : pagefrz->FreezePageRelminMxid = multi;
6859 2 : pfree(members);
6860 2 : return multi;
6861 : }
6862 :
6863 : /*
6864 : * Do a more thorough second pass over the multi to figure out which
6865 : * member XIDs actually need to be kept. Checking the precise status of
6866 : * individual members might even show that we don't need to keep anything.
6867 : * That is quite possible even though the Multi must be >= OldestMxact,
6868 : * since our second pass only keeps member XIDs when it's truly necessary;
6869 : * even member XIDs >= OldestXmin often won't be kept by second pass.
6870 : */
6871 2 : nnewmembers = 0;
6872 2 : newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6873 2 : has_lockers = false;
6874 2 : update_xid = InvalidTransactionId;
6875 2 : update_committed = false;
6876 :
6877 : /*
6878 : * Determine whether to keep each member xid, or to ignore it instead
6879 : */
6880 6 : for (int i = 0; i < nmembers; i++)
6881 : {
6882 4 : TransactionId xid = members[i].xid;
6883 4 : MultiXactStatus mstatus = members[i].status;
6884 :
6885 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6886 :
6887 4 : if (!ISUPDATE_from_mxstatus(mstatus))
6888 : {
6889 : /*
6890 : * Locker XID (not updater XID). We only keep lockers that are
6891 : * still running.
6892 : */
6893 8 : if (TransactionIdIsCurrentTransactionId(xid) ||
6894 4 : TransactionIdIsInProgress(xid))
6895 : {
6896 2 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6897 0 : ereport(ERROR,
6898 : (errcode(ERRCODE_DATA_CORRUPTED),
6899 : errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6900 : multi, xid,
6901 : cutoffs->OldestXmin)));
6902 2 : newmembers[nnewmembers++] = members[i];
6903 2 : has_lockers = true;
6904 : }
6905 :
6906 4 : continue;
6907 : }
6908 :
6909 : /*
6910 : * Updater XID (not locker XID). Should we keep it?
6911 : *
6912 : * Since the tuple wasn't totally removed when vacuum pruned, the
6913 : * update Xid cannot possibly be older than OldestXmin cutoff unless
6914 : * the updater XID aborted. If the updater transaction is known
6915 : * aborted or crashed then it's okay to ignore it, otherwise not.
6916 : *
6917 : * In any case the Multi should never contain two updaters, whatever
6918 : * their individual commit status. Check for that first, in passing.
6919 : */
6920 0 : if (TransactionIdIsValid(update_xid))
6921 0 : ereport(ERROR,
6922 : (errcode(ERRCODE_DATA_CORRUPTED),
6923 : errmsg_internal("multixact %u has two or more updating members",
6924 : multi),
6925 : errdetail_internal("First updater XID=%u second updater XID=%u.",
6926 : update_xid, xid)));
6927 :
6928 : /*
6929 : * As with all tuple visibility routines, it's critical to test
6930 : * TransactionIdIsInProgress before TransactionIdDidCommit, because of
6931 : * race conditions explained in detail in heapam_visibility.c.
6932 : */
6933 0 : if (TransactionIdIsCurrentTransactionId(xid) ||
6934 0 : TransactionIdIsInProgress(xid))
6935 0 : update_xid = xid;
6936 0 : else if (TransactionIdDidCommit(xid))
6937 : {
6938 : /*
6939 : * The transaction committed, so we can tell caller to set
6940 : * HEAP_XMAX_COMMITTED. (We can only do this because we know the
6941 : * transaction is not running.)
6942 : */
6943 0 : update_committed = true;
6944 0 : update_xid = xid;
6945 : }
6946 : else
6947 : {
6948 : /*
6949 : * Not in progress, not committed -- must be aborted or crashed;
6950 : * we can ignore it.
6951 : */
6952 0 : continue;
6953 : }
6954 :
6955 : /*
6956 : * We determined that updater must be kept -- add it to pending new
6957 : * members list
6958 : */
6959 0 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6960 0 : ereport(ERROR,
6961 : (errcode(ERRCODE_DATA_CORRUPTED),
6962 : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6963 : multi, xid, cutoffs->OldestXmin)));
6964 0 : newmembers[nnewmembers++] = members[i];
6965 : }
6966 :
6967 2 : pfree(members);
6968 :
6969 : /*
6970 : * Determine what to do with caller's multi based on information gathered
6971 : * during our second pass
6972 : */
6973 2 : if (nnewmembers == 0)
6974 : {
6975 : /* Nothing worth keeping */
6976 0 : *flags |= FRM_INVALIDATE_XMAX;
6977 0 : newxmax = InvalidTransactionId;
6978 : }
6979 2 : else if (TransactionIdIsValid(update_xid) && !has_lockers)
6980 : {
6981 : /*
6982 : * If there's a single member and it's an update, pass it back alone
6983 : * without creating a new Multi. (XXX we could do this when there's a
6984 : * single remaining locker, too, but that would complicate the API too
6985 : * much; moreover, the case with the single updater is more
6986 : * interesting, because those are longer-lived.)
6987 : */
6988 : Assert(nnewmembers == 1);
6989 0 : *flags |= FRM_RETURN_IS_XID;
6990 0 : if (update_committed)
6991 0 : *flags |= FRM_MARK_COMMITTED;
6992 0 : newxmax = update_xid;
6993 : }
6994 : else
6995 : {
6996 : /*
6997 : * Create a new multixact with the surviving members of the previous
6998 : * one, to set as new Xmax in the tuple
6999 : */
7000 2 : newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
7001 2 : *flags |= FRM_RETURN_IS_MULTI;
7002 : }
7003 :
7004 2 : pfree(newmembers);
7005 :
7006 2 : pagefrz->freeze_required = true;
7007 2 : return newxmax;
7008 : }
7009 :
7010 : /*
7011 : * heap_prepare_freeze_tuple
7012 : *
7013 : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7014 : * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
7015 : * setup enough state (in the *frz output argument) to enable caller to
7016 : * process this tuple as part of freezing its page, and return true. Return
7017 : * false if nothing can be changed about the tuple right now.
7018 : *
7019 : * Also sets *totally_frozen to true if the tuple will be totally frozen once
7020 : * caller executes returned freeze plan (or if the tuple was already totally
7021 : * frozen by an earlier VACUUM). This indicates that there are no remaining
7022 : * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
7023 : *
7024 : * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
7025 : * tuple that we returned true for, and then execute freezing. Caller must
7026 : * initialize pagefrz fields for page as a whole before first call here for
7027 : * each heap page.
7028 : *
7029 : * VACUUM caller decides on whether or not to freeze the page as a whole.
7030 : * We'll often prepare freeze plans for a page that caller just discards.
7031 : * However, VACUUM doesn't always get to make a choice; it must freeze when
7032 : * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7033 : * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7034 : * that VACUUM always follows that rule.
7035 : *
7036 : * We sometimes force freezing of xmax MultiXactId values long before it is
7037 : * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7038 : * It's worth processing MultiXactIds proactively when it is cheap to do so,
7039 : * and it's convenient to make that happen by piggy-backing it on the "force
7040 : * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7041 : * because it is expensive right now (though only when it's still possible to
7042 : * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7043 : *
7044 : * It is assumed that the caller has checked the tuple with
7045 : * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7046 : * (else we should be removing the tuple, not freezing it).
7047 : *
7048 : * NB: This function has side effects: it might allocate a new MultiXactId.
7049 : * It will be set as tuple's new xmax when our *frz output is processed within
7050 : * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7051 : * then caller had better have an exclusive lock on it already.
7052 : */
7053 : bool
7054 55880644 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
7055 : const struct VacuumCutoffs *cutoffs,
7056 : HeapPageFreeze *pagefrz,
7057 : HeapTupleFreeze *frz, bool *totally_frozen)
7058 : {
7059 55880644 : bool xmin_already_frozen = false,
7060 55880644 : xmax_already_frozen = false;
7061 55880644 : bool freeze_xmin = false,
7062 55880644 : replace_xvac = false,
7063 55880644 : replace_xmax = false,
7064 55880644 : freeze_xmax = false;
7065 : TransactionId xid;
7066 :
7067 55880644 : frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
7068 55880644 : frz->t_infomask2 = tuple->t_infomask2;
7069 55880644 : frz->t_infomask = tuple->t_infomask;
7070 55880644 : frz->frzflags = 0;
7071 55880644 : frz->checkflags = 0;
7072 :
7073 : /*
7074 : * Process xmin, while keeping track of whether it's already frozen, or
7075 : * will become frozen iff our freeze plan is executed by caller (could be
7076 : * neither).
7077 : */
7078 55880644 : xid = HeapTupleHeaderGetXmin(tuple);
7079 55880644 : if (!TransactionIdIsNormal(xid))
7080 49936302 : xmin_already_frozen = true;
7081 : else
7082 : {
7083 5944342 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7084 0 : ereport(ERROR,
7085 : (errcode(ERRCODE_DATA_CORRUPTED),
7086 : errmsg_internal("found xmin %u from before relfrozenxid %u",
7087 : xid, cutoffs->relfrozenxid)));
7088 :
7089 : /* Will set freeze_xmin flags in freeze plan below */
7090 5944342 : freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7091 :
7092 : /* Verify that xmin committed if and when freeze plan is executed */
7093 5944342 : if (freeze_xmin)
7094 4601584 : frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
7095 : }
7096 :
7097 : /*
7098 : * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7099 : * as we support having MOVED_OFF/MOVED_IN tuples in the database
7100 : */
7101 55880644 : xid = HeapTupleHeaderGetXvac(tuple);
7102 55880644 : if (TransactionIdIsNormal(xid))
7103 : {
7104 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7105 : Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7106 :
7107 : /*
7108 : * For Xvac, we always freeze proactively. This allows totally_frozen
7109 : * tracking to ignore xvac.
7110 : */
7111 0 : replace_xvac = pagefrz->freeze_required = true;
7112 :
7113 : /* Will set replace_xvac flags in freeze plan below */
7114 : }
7115 :
7116 : /* Now process xmax */
7117 55880644 : xid = frz->xmax;
7118 55880644 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7119 : {
7120 : /* Raw xmax is a MultiXactId */
7121 : TransactionId newxmax;
7122 : uint16 flags;
7123 :
7124 : /*
7125 : * We will either remove xmax completely (in the "freeze_xmax" path),
7126 : * process xmax by replacing it (in the "replace_xmax" path), or
7127 : * perform no-op xmax processing. The only constraint is that the
7128 : * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7129 : */
7130 14 : newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7131 : &flags, pagefrz);
7132 :
7133 14 : if (flags & FRM_NOOP)
7134 : {
7135 : /*
7136 : * xmax is a MultiXactId, and nothing about it changes for now.
7137 : * This is the only case where 'freeze_required' won't have been
7138 : * set for us by FreezeMultiXactId, as well as the only case where
7139 : * neither freeze_xmax nor replace_xmax are set (given a multi).
7140 : *
7141 : * This is a no-op, but the call to FreezeMultiXactId might have
7142 : * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7143 : * for us (the "freeze page" variants, specifically). That'll
7144 : * make it safe for our caller to freeze the page later on, while
7145 : * leaving this particular xmax undisturbed.
7146 : *
7147 : * FreezeMultiXactId is _not_ responsible for the "no freeze"
7148 : * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7149 : * job. A call to heap_tuple_should_freeze for this same tuple
7150 : * will take place below if 'freeze_required' isn't set already.
7151 : * (This repeats work from FreezeMultiXactId, but allows "no
7152 : * freeze" tracker maintenance to happen in only one place.)
7153 : */
7154 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
7155 : Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
7156 : }
7157 12 : else if (flags & FRM_RETURN_IS_XID)
7158 : {
7159 : /*
7160 : * xmax will become an updater Xid (original MultiXact's updater
7161 : * member Xid will be carried forward as a simple Xid in Xmax).
7162 : */
7163 : Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
7164 :
7165 : /*
7166 : * NB -- some of these transformations are only valid because we
7167 : * know the return Xid is a tuple updater (i.e. not merely a
7168 : * locker.) Also note that the only reason we don't explicitly
7169 : * worry about HEAP_KEYS_UPDATED is because it lives in
7170 : * t_infomask2 rather than t_infomask.
7171 : */
7172 0 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7173 0 : frz->xmax = newxmax;
7174 0 : if (flags & FRM_MARK_COMMITTED)
7175 0 : frz->t_infomask |= HEAP_XMAX_COMMITTED;
7176 0 : replace_xmax = true;
7177 : }
7178 12 : else if (flags & FRM_RETURN_IS_MULTI)
7179 : {
7180 : uint16 newbits;
7181 : uint16 newbits2;
7182 :
7183 : /*
7184 : * xmax is an old MultiXactId that we have to replace with a new
7185 : * MultiXactId, to carry forward two or more original member XIDs.
7186 : */
7187 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
7188 :
7189 : /*
7190 : * We can't use GetMultiXactIdHintBits directly on the new multi
7191 : * here; that routine initializes the masks to all zeroes, which
7192 : * would lose other bits we need. Doing it this way ensures all
7193 : * unrelated bits remain untouched.
7194 : */
7195 2 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7196 2 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7197 2 : GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
7198 2 : frz->t_infomask |= newbits;
7199 2 : frz->t_infomask2 |= newbits2;
7200 2 : frz->xmax = newxmax;
7201 2 : replace_xmax = true;
7202 : }
7203 : else
7204 : {
7205 : /*
7206 : * Freeze plan for tuple "freezes xmax" in the strictest sense:
7207 : * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7208 : */
7209 : Assert(flags & FRM_INVALIDATE_XMAX);
7210 : Assert(!TransactionIdIsValid(newxmax));
7211 :
7212 : /* Will set freeze_xmax flags in freeze plan below */
7213 10 : freeze_xmax = true;
7214 : }
7215 :
7216 : /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
7217 : Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
7218 : }
7219 55880630 : else if (TransactionIdIsNormal(xid))
7220 : {
7221 : /* Raw xmax is normal XID */
7222 20375066 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7223 0 : ereport(ERROR,
7224 : (errcode(ERRCODE_DATA_CORRUPTED),
7225 : errmsg_internal("found xmax %u from before relfrozenxid %u",
7226 : xid, cutoffs->relfrozenxid)));
7227 :
7228 : /* Will set freeze_xmax flags in freeze plan below */
7229 20375066 : freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7230 :
7231 : /*
7232 : * Verify that xmax aborted if and when freeze plan is executed,
7233 : * provided it's from an update. (A lock-only xmax can be removed
7234 : * independent of this, since the lock is released at xact end.)
7235 : */
7236 20375066 : if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
7237 2686 : frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
7238 : }
7239 35505564 : else if (!TransactionIdIsValid(xid))
7240 : {
7241 : /* Raw xmax is InvalidTransactionId XID */
7242 : Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
7243 35505564 : xmax_already_frozen = true;
7244 : }
7245 : else
7246 0 : ereport(ERROR,
7247 : (errcode(ERRCODE_DATA_CORRUPTED),
7248 : errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7249 : xid, tuple->t_infomask)));
7250 :
7251 55880644 : if (freeze_xmin)
7252 : {
7253 : Assert(!xmin_already_frozen);
7254 :
7255 4601584 : frz->t_infomask |= HEAP_XMIN_FROZEN;
7256 : }
7257 55880644 : if (replace_xvac)
7258 : {
7259 : /*
7260 : * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7261 : * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7262 : * transaction succeeded.
7263 : */
7264 : Assert(pagefrz->freeze_required);
7265 0 : if (tuple->t_infomask & HEAP_MOVED_OFF)
7266 0 : frz->frzflags |= XLH_INVALID_XVAC;
7267 : else
7268 0 : frz->frzflags |= XLH_FREEZE_XVAC;
7269 : }
7270 : if (replace_xmax)
7271 : {
7272 : Assert(!xmax_already_frozen && !freeze_xmax);
7273 : Assert(pagefrz->freeze_required);
7274 :
7275 : /* Already set replace_xmax flags in freeze plan earlier */
7276 : }
7277 55880644 : if (freeze_xmax)
7278 : {
7279 : Assert(!xmax_already_frozen && !replace_xmax);
7280 :
7281 4606 : frz->xmax = InvalidTransactionId;
7282 :
7283 : /*
7284 : * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7285 : * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7286 : * Also get rid of the HEAP_KEYS_UPDATED bit.
7287 : */
7288 4606 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7289 4606 : frz->t_infomask |= HEAP_XMAX_INVALID;
7290 4606 : frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7291 4606 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7292 : }
7293 :
7294 : /*
7295 : * Determine if this tuple is already totally frozen, or will become
7296 : * totally frozen (provided caller executes freeze plans for the page)
7297 : */
7298 110413924 : *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
7299 54533280 : (freeze_xmax || xmax_already_frozen));
7300 :
7301 55880644 : if (!pagefrz->freeze_required && !(xmin_already_frozen &&
7302 : xmax_already_frozen))
7303 : {
7304 : /*
7305 : * So far no previous tuple from the page made freezing mandatory.
7306 : * Does this tuple force caller to freeze the entire page?
7307 : */
7308 23261442 : pagefrz->freeze_required =
7309 23261442 : heap_tuple_should_freeze(tuple, cutoffs,
7310 : &pagefrz->NoFreezePageRelfrozenXid,
7311 : &pagefrz->NoFreezePageRelminMxid);
7312 : }
7313 :
7314 : /* Tell caller if this tuple has a usable freeze plan set in *frz */
7315 55880644 : return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
7316 : }
7317 :
7318 : /*
7319 : * Perform xmin/xmax XID status sanity checks before actually executing freeze
7320 : * plans.
7321 : *
7322 : * heap_prepare_freeze_tuple doesn't perform these checks directly because
7323 : * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7324 : * successive VACUUMs that each decide against freezing the same page.
7325 : */
7326 : void
7327 45766 : heap_pre_freeze_checks(Buffer buffer,
7328 : HeapTupleFreeze *tuples, int ntuples)
7329 : {
7330 45766 : Page page = BufferGetPage(buffer);
7331 :
7332 1956886 : for (int i = 0; i < ntuples; i++)
7333 : {
7334 1911120 : HeapTupleFreeze *frz = tuples + i;
7335 1911120 : ItemId itemid = PageGetItemId(page, frz->offset);
7336 : HeapTupleHeader htup;
7337 :
7338 1911120 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7339 :
7340 : /* Deliberately avoid relying on tuple hint bits here */
7341 1911120 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
7342 : {
7343 1911118 : TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
7344 :
7345 : Assert(!HeapTupleHeaderXminFrozen(htup));
7346 1911118 : if (unlikely(!TransactionIdDidCommit(xmin)))
7347 0 : ereport(ERROR,
7348 : (errcode(ERRCODE_DATA_CORRUPTED),
7349 : errmsg_internal("uncommitted xmin %u needs to be frozen",
7350 : xmin)));
7351 : }
7352 :
7353 : /*
7354 : * TransactionIdDidAbort won't work reliably in the presence of XIDs
7355 : * left behind by transactions that were in progress during a crash,
7356 : * so we can only check that xmax didn't commit
7357 : */
7358 1911120 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
7359 : {
7360 866 : TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
7361 :
7362 : Assert(TransactionIdIsNormal(xmax));
7363 866 : if (unlikely(TransactionIdDidCommit(xmax)))
7364 0 : ereport(ERROR,
7365 : (errcode(ERRCODE_DATA_CORRUPTED),
7366 : errmsg_internal("cannot freeze committed xmax %u",
7367 : xmax)));
7368 : }
7369 : }
7370 45766 : }
7371 :
7372 : /*
7373 : * Helper which executes freezing of one or more heap tuples on a page on
7374 : * behalf of caller. Caller passes an array of tuple plans from
7375 : * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7376 : * Must be called in a critical section that also marks the buffer dirty and,
7377 : * if needed, emits WAL.
7378 : */
7379 : void
7380 45766 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
7381 : {
7382 45766 : Page page = BufferGetPage(buffer);
7383 :
7384 1956886 : for (int i = 0; i < ntuples; i++)
7385 : {
7386 1911120 : HeapTupleFreeze *frz = tuples + i;
7387 1911120 : ItemId itemid = PageGetItemId(page, frz->offset);
7388 : HeapTupleHeader htup;
7389 :
7390 1911120 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7391 1911120 : heap_execute_freeze_tuple(htup, frz);
7392 : }
7393 45766 : }
7394 :
7395 : /*
7396 : * heap_freeze_tuple
7397 : * Freeze tuple in place, without WAL logging.
7398 : *
7399 : * Useful for callers like CLUSTER that perform their own WAL logging.
7400 : */
7401 : bool
7402 727912 : heap_freeze_tuple(HeapTupleHeader tuple,
7403 : TransactionId relfrozenxid, TransactionId relminmxid,
7404 : TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7405 : {
7406 : HeapTupleFreeze frz;
7407 : bool do_freeze;
7408 : bool totally_frozen;
7409 : struct VacuumCutoffs cutoffs;
7410 : HeapPageFreeze pagefrz;
7411 :
7412 727912 : cutoffs.relfrozenxid = relfrozenxid;
7413 727912 : cutoffs.relminmxid = relminmxid;
7414 727912 : cutoffs.OldestXmin = FreezeLimit;
7415 727912 : cutoffs.OldestMxact = MultiXactCutoff;
7416 727912 : cutoffs.FreezeLimit = FreezeLimit;
7417 727912 : cutoffs.MultiXactCutoff = MultiXactCutoff;
7418 :
7419 727912 : pagefrz.freeze_required = true;
7420 727912 : pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7421 727912 : pagefrz.FreezePageRelminMxid = MultiXactCutoff;
7422 727912 : pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7423 727912 : pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7424 :
7425 727912 : do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7426 : &pagefrz, &frz, &totally_frozen);
7427 :
7428 : /*
7429 : * Note that because this is not a WAL-logged operation, we don't need to
7430 : * fill in the offset in the freeze record.
7431 : */
7432 :
7433 727912 : if (do_freeze)
7434 513514 : heap_execute_freeze_tuple(tuple, &frz);
7435 727912 : return do_freeze;
7436 : }
7437 :
7438 : /*
7439 : * For a given MultiXactId, return the hint bits that should be set in the
7440 : * tuple's infomask.
7441 : *
7442 : * Normally this should be called for a multixact that was just created, and
7443 : * so is on our local cache, so the GetMembers call is fast.
7444 : */
7445 : static void
7446 2376 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
7447 : uint16 *new_infomask2)
7448 : {
7449 : int nmembers;
7450 : MultiXactMember *members;
7451 : int i;
7452 2376 : uint16 bits = HEAP_XMAX_IS_MULTI;
7453 2376 : uint16 bits2 = 0;
7454 2376 : bool has_update = false;
7455 2376 : LockTupleMode strongest = LockTupleKeyShare;
7456 :
7457 : /*
7458 : * We only use this in multis we just created, so they cannot be values
7459 : * pre-pg_upgrade.
7460 : */
7461 2376 : nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7462 :
7463 7266 : for (i = 0; i < nmembers; i++)
7464 : {
7465 : LockTupleMode mode;
7466 :
7467 : /*
7468 : * Remember the strongest lock mode held by any member of the
7469 : * multixact.
7470 : */
7471 4890 : mode = TUPLOCK_from_mxstatus(members[i].status);
7472 4890 : if (mode > strongest)
7473 1320 : strongest = mode;
7474 :
7475 : /* See what other bits we need */
7476 4890 : switch (members[i].status)
7477 : {
7478 4508 : case MultiXactStatusForKeyShare:
7479 : case MultiXactStatusForShare:
7480 : case MultiXactStatusForNoKeyUpdate:
7481 4508 : break;
7482 :
7483 104 : case MultiXactStatusForUpdate:
7484 104 : bits2 |= HEAP_KEYS_UPDATED;
7485 104 : break;
7486 :
7487 258 : case MultiXactStatusNoKeyUpdate:
7488 258 : has_update = true;
7489 258 : break;
7490 :
7491 20 : case MultiXactStatusUpdate:
7492 20 : bits2 |= HEAP_KEYS_UPDATED;
7493 20 : has_update = true;
7494 20 : break;
7495 : }
7496 : }
7497 :
7498 2376 : if (strongest == LockTupleExclusive ||
7499 : strongest == LockTupleNoKeyExclusive)
7500 438 : bits |= HEAP_XMAX_EXCL_LOCK;
7501 1938 : else if (strongest == LockTupleShare)
7502 876 : bits |= HEAP_XMAX_SHR_LOCK;
7503 1062 : else if (strongest == LockTupleKeyShare)
7504 1062 : bits |= HEAP_XMAX_KEYSHR_LOCK;
7505 :
7506 2376 : if (!has_update)
7507 2098 : bits |= HEAP_XMAX_LOCK_ONLY;
7508 :
7509 2376 : if (nmembers > 0)
7510 2376 : pfree(members);
7511 :
7512 2376 : *new_infomask = bits;
7513 2376 : *new_infomask2 = bits2;
7514 2376 : }
7515 :
7516 : /*
7517 : * MultiXactIdGetUpdateXid
7518 : *
7519 : * Given a multixact Xmax and corresponding infomask, which does not have the
7520 : * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7521 : * transaction.
7522 : *
7523 : * Caller is expected to check the status of the updating transaction, if
7524 : * necessary.
7525 : */
7526 : static TransactionId
7527 980 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
7528 : {
7529 980 : TransactionId update_xact = InvalidTransactionId;
7530 : MultiXactMember *members;
7531 : int nmembers;
7532 :
7533 : Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7534 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7535 :
7536 : /*
7537 : * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7538 : * pre-pg_upgrade.
7539 : */
7540 980 : nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7541 :
7542 980 : if (nmembers > 0)
7543 : {
7544 : int i;
7545 :
7546 2548 : for (i = 0; i < nmembers; i++)
7547 : {
7548 : /* Ignore lockers */
7549 2548 : if (!ISUPDATE_from_mxstatus(members[i].status))
7550 1568 : continue;
7551 :
7552 : /* there can be at most one updater */
7553 : Assert(update_xact == InvalidTransactionId);
7554 980 : update_xact = members[i].xid;
7555 : #ifndef USE_ASSERT_CHECKING
7556 :
7557 : /*
7558 : * in an assert-enabled build, walk the whole array to ensure
7559 : * there's no other updater.
7560 : */
7561 980 : break;
7562 : #endif
7563 : }
7564 :
7565 980 : pfree(members);
7566 : }
7567 :
7568 980 : return update_xact;
7569 : }
7570 :
7571 : /*
7572 : * HeapTupleGetUpdateXid
7573 : * As above, but use a HeapTupleHeader
7574 : *
7575 : * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7576 : * checking the hint bits.
7577 : */
7578 : TransactionId
7579 964 : HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
7580 : {
7581 964 : return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tup),
7582 964 : tup->t_infomask);
7583 : }
7584 :
7585 : /*
7586 : * Does the given multixact conflict with the current transaction grabbing a
7587 : * tuple lock of the given strength?
7588 : *
7589 : * The passed infomask pairs up with the given multixact in the tuple header.
7590 : *
7591 : * If current_is_member is not NULL, it is set to 'true' if the current
7592 : * transaction is a member of the given multixact.
7593 : */
7594 : static bool
7595 198 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
7596 : LockTupleMode lockmode, bool *current_is_member)
7597 : {
7598 : int nmembers;
7599 : MultiXactMember *members;
7600 198 : bool result = false;
7601 198 : LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7602 :
7603 198 : if (HEAP_LOCKED_UPGRADED(infomask))
7604 0 : return false;
7605 :
7606 198 : nmembers = GetMultiXactIdMembers(multi, &members, false,
7607 198 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7608 198 : if (nmembers >= 0)
7609 : {
7610 : int i;
7611 :
7612 618 : for (i = 0; i < nmembers; i++)
7613 : {
7614 : TransactionId memxid;
7615 : LOCKMODE memlockmode;
7616 :
7617 434 : if (result && (current_is_member == NULL || *current_is_member))
7618 : break;
7619 :
7620 420 : memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7621 :
7622 : /* ignore members from current xact (but track their presence) */
7623 420 : memxid = members[i].xid;
7624 420 : if (TransactionIdIsCurrentTransactionId(memxid))
7625 : {
7626 184 : if (current_is_member != NULL)
7627 156 : *current_is_member = true;
7628 184 : continue;
7629 : }
7630 236 : else if (result)
7631 16 : continue;
7632 :
7633 : /* ignore members that don't conflict with the lock we want */
7634 220 : if (!DoLockModesConflict(memlockmode, wanted))
7635 142 : continue;
7636 :
7637 78 : if (ISUPDATE_from_mxstatus(members[i].status))
7638 : {
7639 : /* ignore aborted updaters */
7640 34 : if (TransactionIdDidAbort(memxid))
7641 2 : continue;
7642 : }
7643 : else
7644 : {
7645 : /* ignore lockers-only that are no longer in progress */
7646 44 : if (!TransactionIdIsInProgress(memxid))
7647 14 : continue;
7648 : }
7649 :
7650 : /*
7651 : * Whatever remains are either live lockers that conflict with our
7652 : * wanted lock, and updaters that are not aborted. Those conflict
7653 : * with what we want. Set up to return true, but keep going to
7654 : * look for the current transaction among the multixact members,
7655 : * if needed.
7656 : */
7657 62 : result = true;
7658 : }
7659 198 : pfree(members);
7660 : }
7661 :
7662 198 : return result;
7663 : }
7664 :
7665 : /*
7666 : * Do_MultiXactIdWait
7667 : * Actual implementation for the two functions below.
7668 : *
7669 : * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7670 : * needed to ensure we only sleep on conflicting members, and the infomask is
7671 : * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7672 : * indicates whether to use conditional lock acquisition, to allow callers to
7673 : * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7674 : * context information for error messages. 'remaining', if not NULL, receives
7675 : * the number of members that are still running, including any (non-aborted)
7676 : * subtransactions of our own transaction. 'logLockFailure' indicates whether
7677 : * to log details when a lock acquisition fails with 'nowait' enabled.
7678 : *
7679 : * We do this by sleeping on each member using XactLockTableWait. Any
7680 : * members that belong to the current backend are *not* waited for, however;
7681 : * this would not merely be useless but would lead to Assert failure inside
7682 : * XactLockTableWait. By the time this returns, it is certain that all
7683 : * transactions *of other backends* that were members of the MultiXactId
7684 : * that conflict with the requested status are dead (and no new ones can have
7685 : * been added, since it is not legal to add members to an existing
7686 : * MultiXactId).
7687 : *
7688 : * But by the time we finish sleeping, someone else may have changed the Xmax
7689 : * of the containing tuple, so the caller needs to iterate on us somehow.
7690 : *
7691 : * Note that in case we return false, the number of remaining members is
7692 : * not to be trusted.
7693 : */
7694 : static bool
7695 116 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7696 : uint16 infomask, bool nowait,
7697 : Relation rel, ItemPointer ctid, XLTW_Oper oper,
7698 : int *remaining, bool logLockFailure)
7699 : {
7700 116 : bool result = true;
7701 : MultiXactMember *members;
7702 : int nmembers;
7703 116 : int remain = 0;
7704 :
7705 : /* for pre-pg_upgrade tuples, no need to sleep at all */
7706 116 : nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7707 116 : GetMultiXactIdMembers(multi, &members, false,
7708 116 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7709 :
7710 116 : if (nmembers >= 0)
7711 : {
7712 : int i;
7713 :
7714 374 : for (i = 0; i < nmembers; i++)
7715 : {
7716 266 : TransactionId memxid = members[i].xid;
7717 266 : MultiXactStatus memstatus = members[i].status;
7718 :
7719 266 : if (TransactionIdIsCurrentTransactionId(memxid))
7720 : {
7721 48 : remain++;
7722 48 : continue;
7723 : }
7724 :
7725 218 : if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
7726 218 : LOCKMODE_from_mxstatus(status)))
7727 : {
7728 44 : if (remaining && TransactionIdIsInProgress(memxid))
7729 16 : remain++;
7730 44 : continue;
7731 : }
7732 :
7733 : /*
7734 : * This member conflicts with our multi, so we have to sleep (or
7735 : * return failure, if asked to avoid waiting.)
7736 : *
7737 : * Note that we don't set up an error context callback ourselves,
7738 : * but instead we pass the info down to XactLockTableWait. This
7739 : * might seem a bit wasteful because the context is set up and
7740 : * tore down for each member of the multixact, but in reality it
7741 : * should be barely noticeable, and it avoids duplicate code.
7742 : */
7743 174 : if (nowait)
7744 : {
7745 8 : result = ConditionalXactLockTableWait(memxid, logLockFailure);
7746 8 : if (!result)
7747 8 : break;
7748 : }
7749 : else
7750 166 : XactLockTableWait(memxid, rel, ctid, oper);
7751 : }
7752 :
7753 116 : pfree(members);
7754 : }
7755 :
7756 116 : if (remaining)
7757 20 : *remaining = remain;
7758 :
7759 116 : return result;
7760 : }
7761 :
7762 : /*
7763 : * MultiXactIdWait
7764 : * Sleep on a MultiXactId.
7765 : *
7766 : * By the time we finish sleeping, someone else may have changed the Xmax
7767 : * of the containing tuple, so the caller needs to iterate on us somehow.
7768 : *
7769 : * We return (in *remaining, if not NULL) the number of members that are still
7770 : * running, including any (non-aborted) subtransactions of our own transaction.
7771 : */
7772 : static void
7773 108 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
7774 : Relation rel, ItemPointer ctid, XLTW_Oper oper,
7775 : int *remaining)
7776 : {
7777 108 : (void) Do_MultiXactIdWait(multi, status, infomask, false,
7778 : rel, ctid, oper, remaining, false);
7779 108 : }
7780 :
7781 : /*
7782 : * ConditionalMultiXactIdWait
7783 : * As above, but only lock if we can get the lock without blocking.
7784 : *
7785 : * By the time we finish sleeping, someone else may have changed the Xmax
7786 : * of the containing tuple, so the caller needs to iterate on us somehow.
7787 : *
7788 : * If the multixact is now all gone, return true. Returns false if some
7789 : * transactions might still be running.
7790 : *
7791 : * We return (in *remaining, if not NULL) the number of members that are still
7792 : * running, including any (non-aborted) subtransactions of our own transaction.
7793 : */
7794 : static bool
7795 8 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7796 : uint16 infomask, Relation rel, int *remaining,
7797 : bool logLockFailure)
7798 : {
7799 8 : return Do_MultiXactIdWait(multi, status, infomask, true,
7800 : rel, NULL, XLTW_None, remaining, logLockFailure);
7801 : }
7802 :
7803 : /*
7804 : * heap_tuple_needs_eventual_freeze
7805 : *
7806 : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7807 : * will eventually require freezing (if tuple isn't removed by pruning first).
7808 : */
7809 : bool
7810 295358 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
7811 : {
7812 : TransactionId xid;
7813 :
7814 : /*
7815 : * If xmin is a normal transaction ID, this tuple is definitely not
7816 : * frozen.
7817 : */
7818 295358 : xid = HeapTupleHeaderGetXmin(tuple);
7819 295358 : if (TransactionIdIsNormal(xid))
7820 5840 : return true;
7821 :
7822 : /*
7823 : * If xmax is a valid xact or multixact, this tuple is also not frozen.
7824 : */
7825 289518 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7826 : {
7827 : MultiXactId multi;
7828 :
7829 0 : multi = HeapTupleHeaderGetRawXmax(tuple);
7830 0 : if (MultiXactIdIsValid(multi))
7831 0 : return true;
7832 : }
7833 : else
7834 : {
7835 289518 : xid = HeapTupleHeaderGetRawXmax(tuple);
7836 289518 : if (TransactionIdIsNormal(xid))
7837 18 : return true;
7838 : }
7839 :
7840 289500 : if (tuple->t_infomask & HEAP_MOVED)
7841 : {
7842 0 : xid = HeapTupleHeaderGetXvac(tuple);
7843 0 : if (TransactionIdIsNormal(xid))
7844 0 : return true;
7845 : }
7846 :
7847 289500 : return false;
7848 : }
7849 :
7850 : /*
7851 : * heap_tuple_should_freeze
7852 : *
7853 : * Return value indicates if heap_prepare_freeze_tuple sibling function would
7854 : * (or should) force freezing of the heap page that contains caller's tuple.
7855 : * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7856 : * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7857 : *
7858 : * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7859 : * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7860 : * Our working assumption is that caller won't decide to freeze this tuple.
7861 : * It's up to caller to only ratchet back its own top-level trackers after the
7862 : * point that it fully commits to not freezing the tuple/page in question.
7863 : */
7864 : bool
7865 23267300 : heap_tuple_should_freeze(HeapTupleHeader tuple,
7866 : const struct VacuumCutoffs *cutoffs,
7867 : TransactionId *NoFreezePageRelfrozenXid,
7868 : MultiXactId *NoFreezePageRelminMxid)
7869 : {
7870 : TransactionId xid;
7871 : MultiXactId multi;
7872 23267300 : bool freeze = false;
7873 :
7874 : /* First deal with xmin */
7875 23267300 : xid = HeapTupleHeaderGetXmin(tuple);
7876 23267300 : if (TransactionIdIsNormal(xid))
7877 : {
7878 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7879 3600472 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7880 45886 : *NoFreezePageRelfrozenXid = xid;
7881 3600472 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7882 41978 : freeze = true;
7883 : }
7884 :
7885 : /* Now deal with xmax */
7886 23267300 : xid = InvalidTransactionId;
7887 23267300 : multi = InvalidMultiXactId;
7888 23267300 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7889 4 : multi = HeapTupleHeaderGetRawXmax(tuple);
7890 : else
7891 23267296 : xid = HeapTupleHeaderGetRawXmax(tuple);
7892 :
7893 23267300 : if (TransactionIdIsNormal(xid))
7894 : {
7895 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7896 : /* xmax is a non-permanent XID */
7897 20231740 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7898 8 : *NoFreezePageRelfrozenXid = xid;
7899 20231740 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7900 42 : freeze = true;
7901 : }
7902 3035560 : else if (!MultiXactIdIsValid(multi))
7903 : {
7904 : /* xmax is a permanent XID or invalid MultiXactId/XID */
7905 : }
7906 4 : else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
7907 : {
7908 : /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
7909 0 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7910 0 : *NoFreezePageRelminMxid = multi;
7911 : /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
7912 0 : freeze = true;
7913 : }
7914 : else
7915 : {
7916 : /* xmax is a MultiXactId that may have an updater XID */
7917 : MultiXactMember *members;
7918 : int nmembers;
7919 :
7920 : Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
7921 4 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7922 4 : *NoFreezePageRelminMxid = multi;
7923 4 : if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
7924 4 : freeze = true;
7925 :
7926 : /* need to check whether any member of the mxact is old */
7927 4 : nmembers = GetMultiXactIdMembers(multi, &members, false,
7928 4 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
7929 :
7930 10 : for (int i = 0; i < nmembers; i++)
7931 : {
7932 6 : xid = members[i].xid;
7933 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7934 6 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7935 0 : *NoFreezePageRelfrozenXid = xid;
7936 6 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7937 0 : freeze = true;
7938 : }
7939 4 : if (nmembers > 0)
7940 2 : pfree(members);
7941 : }
7942 :
7943 23267300 : if (tuple->t_infomask & HEAP_MOVED)
7944 : {
7945 0 : xid = HeapTupleHeaderGetXvac(tuple);
7946 0 : if (TransactionIdIsNormal(xid))
7947 : {
7948 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7949 0 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7950 0 : *NoFreezePageRelfrozenXid = xid;
7951 : /* heap_prepare_freeze_tuple forces xvac freezing */
7952 0 : freeze = true;
7953 : }
7954 : }
7955 :
7956 23267300 : return freeze;
7957 : }
7958 :
7959 : /*
7960 : * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
7961 : * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
7962 : * that caller is in the process of physically removing, e.g. via HOT pruning
7963 : * or index deletion.
7964 : *
7965 : * Caller must initialize its value to InvalidTransactionId, which is
7966 : * generally interpreted as "definitely no need for a recovery conflict".
7967 : * Final value must reflect all heap tuples that caller will physically remove
7968 : * (or remove TID references to) via its ongoing pruning/deletion operation.
7969 : * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
7970 : * caller's WAL record) by REDO routine when it replays caller's operation.
7971 : */
7972 : void
7973 3090164 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
7974 : TransactionId *snapshotConflictHorizon)
7975 : {
7976 3090164 : TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
7977 3090164 : TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
7978 3090164 : TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
7979 :
7980 3090164 : if (tuple->t_infomask & HEAP_MOVED)
7981 : {
7982 0 : if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
7983 0 : *snapshotConflictHorizon = xvac;
7984 : }
7985 :
7986 : /*
7987 : * Ignore tuples inserted by an aborted transaction or if the tuple was
7988 : * updated/deleted by the inserting transaction.
7989 : *
7990 : * Look for a committed hint bit, or if no xmin bit is set, check clog.
7991 : */
7992 3090164 : if (HeapTupleHeaderXminCommitted(tuple) ||
7993 208224 : (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
7994 : {
7995 5560140 : if (xmax != xmin &&
7996 2624602 : TransactionIdFollows(xmax, *snapshotConflictHorizon))
7997 196256 : *snapshotConflictHorizon = xmax;
7998 : }
7999 3090164 : }
8000 :
8001 : #ifdef USE_PREFETCH
8002 : /*
8003 : * Helper function for heap_index_delete_tuples. Issues prefetch requests for
8004 : * prefetch_count buffers. The prefetch_state keeps track of all the buffers
8005 : * we can prefetch, and which have already been prefetched; each call to this
8006 : * function picks up where the previous call left off.
8007 : *
8008 : * Note: we expect the deltids array to be sorted in an order that groups TIDs
8009 : * by heap block, with all TIDs for each block appearing together in exactly
8010 : * one group.
8011 : */
8012 : static void
8013 37130 : index_delete_prefetch_buffer(Relation rel,
8014 : IndexDeletePrefetchState *prefetch_state,
8015 : int prefetch_count)
8016 : {
8017 37130 : BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
8018 37130 : int count = 0;
8019 : int i;
8020 37130 : int ndeltids = prefetch_state->ndeltids;
8021 37130 : TM_IndexDelete *deltids = prefetch_state->deltids;
8022 :
8023 37130 : for (i = prefetch_state->next_item;
8024 1285056 : i < ndeltids && count < prefetch_count;
8025 1247926 : i++)
8026 : {
8027 1247926 : ItemPointer htid = &deltids[i].tid;
8028 :
8029 2484676 : if (cur_hblkno == InvalidBlockNumber ||
8030 1236750 : ItemPointerGetBlockNumber(htid) != cur_hblkno)
8031 : {
8032 33576 : cur_hblkno = ItemPointerGetBlockNumber(htid);
8033 33576 : PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
8034 33576 : count++;
8035 : }
8036 : }
8037 :
8038 : /*
8039 : * Save the prefetch position so that next time we can continue from that
8040 : * position.
8041 : */
8042 37130 : prefetch_state->next_item = i;
8043 37130 : prefetch_state->cur_hblkno = cur_hblkno;
8044 37130 : }
8045 : #endif
8046 :
8047 : /*
8048 : * Helper function for heap_index_delete_tuples. Checks for index corruption
8049 : * involving an invalid TID in index AM caller's index page.
8050 : *
8051 : * This is an ideal place for these checks. The index AM must hold a buffer
8052 : * lock on the index page containing the TIDs we examine here, so we don't
8053 : * have to worry about concurrent VACUUMs at all. We can be sure that the
8054 : * index is corrupt when htid points directly to an LP_UNUSED item or
8055 : * heap-only tuple, which is not the case during standard index scans.
8056 : */
8057 : static inline void
8058 1044902 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
8059 : Page page, OffsetNumber maxoff,
8060 : ItemPointer htid, TM_IndexStatus *istatus)
8061 : {
8062 1044902 : OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
8063 : ItemId iid;
8064 :
8065 : Assert(OffsetNumberIsValid(istatus->idxoffnum));
8066 :
8067 1044902 : if (unlikely(indexpagehoffnum > maxoff))
8068 0 : ereport(ERROR,
8069 : (errcode(ERRCODE_INDEX_CORRUPTED),
8070 : errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8071 : ItemPointerGetBlockNumber(htid),
8072 : indexpagehoffnum,
8073 : istatus->idxoffnum, delstate->iblknum,
8074 : RelationGetRelationName(delstate->irel))));
8075 :
8076 1044902 : iid = PageGetItemId(page, indexpagehoffnum);
8077 1044902 : if (unlikely(!ItemIdIsUsed(iid)))
8078 0 : ereport(ERROR,
8079 : (errcode(ERRCODE_INDEX_CORRUPTED),
8080 : errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8081 : ItemPointerGetBlockNumber(htid),
8082 : indexpagehoffnum,
8083 : istatus->idxoffnum, delstate->iblknum,
8084 : RelationGetRelationName(delstate->irel))));
8085 :
8086 1044902 : if (ItemIdHasStorage(iid))
8087 : {
8088 : HeapTupleHeader htup;
8089 :
8090 : Assert(ItemIdIsNormal(iid));
8091 620222 : htup = (HeapTupleHeader) PageGetItem(page, iid);
8092 :
8093 620222 : if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
8094 0 : ereport(ERROR,
8095 : (errcode(ERRCODE_INDEX_CORRUPTED),
8096 : errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8097 : ItemPointerGetBlockNumber(htid),
8098 : indexpagehoffnum,
8099 : istatus->idxoffnum, delstate->iblknum,
8100 : RelationGetRelationName(delstate->irel))));
8101 : }
8102 1044902 : }
8103 :
8104 : /*
8105 : * heapam implementation of tableam's index_delete_tuples interface.
8106 : *
8107 : * This helper function is called by index AMs during index tuple deletion.
8108 : * See tableam header comments for an explanation of the interface implemented
8109 : * here and a general theory of operation. Note that each call here is either
8110 : * a simple index deletion call, or a bottom-up index deletion call.
8111 : *
8112 : * It's possible for this to generate a fair amount of I/O, since we may be
8113 : * deleting hundreds of tuples from a single index block. To amortize that
8114 : * cost to some degree, this uses prefetching and combines repeat accesses to
8115 : * the same heap block.
8116 : */
8117 : TransactionId
8118 11176 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
8119 : {
8120 : /* Initial assumption is that earlier pruning took care of conflict */
8121 11176 : TransactionId snapshotConflictHorizon = InvalidTransactionId;
8122 11176 : BlockNumber blkno = InvalidBlockNumber;
8123 11176 : Buffer buf = InvalidBuffer;
8124 11176 : Page page = NULL;
8125 11176 : OffsetNumber maxoff = InvalidOffsetNumber;
8126 : TransactionId priorXmax;
8127 : #ifdef USE_PREFETCH
8128 : IndexDeletePrefetchState prefetch_state;
8129 : int prefetch_distance;
8130 : #endif
8131 : SnapshotData SnapshotNonVacuumable;
8132 11176 : int finalndeltids = 0,
8133 11176 : nblocksaccessed = 0;
8134 :
8135 : /* State that's only used in bottom-up index deletion case */
8136 11176 : int nblocksfavorable = 0;
8137 11176 : int curtargetfreespace = delstate->bottomupfreespace,
8138 11176 : lastfreespace = 0,
8139 11176 : actualfreespace = 0;
8140 11176 : bool bottomup_final_block = false;
8141 :
8142 11176 : InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
8143 :
8144 : /* Sort caller's deltids array by TID for further processing */
8145 11176 : index_delete_sort(delstate);
8146 :
8147 : /*
8148 : * Bottom-up case: resort deltids array in an order attuned to where the
8149 : * greatest number of promising TIDs are to be found, and determine how
8150 : * many blocks from the start of sorted array should be considered
8151 : * favorable. This will also shrink the deltids array in order to
8152 : * eliminate completely unfavorable blocks up front.
8153 : */
8154 11176 : if (delstate->bottomup)
8155 3890 : nblocksfavorable = bottomup_sort_and_shrink(delstate);
8156 :
8157 : #ifdef USE_PREFETCH
8158 : /* Initialize prefetch state. */
8159 11176 : prefetch_state.cur_hblkno = InvalidBlockNumber;
8160 11176 : prefetch_state.next_item = 0;
8161 11176 : prefetch_state.ndeltids = delstate->ndeltids;
8162 11176 : prefetch_state.deltids = delstate->deltids;
8163 :
8164 : /*
8165 : * Determine the prefetch distance that we will attempt to maintain.
8166 : *
8167 : * Since the caller holds a buffer lock somewhere in rel, we'd better make
8168 : * sure that isn't a catalog relation before we call code that does
8169 : * syscache lookups, to avoid risk of deadlock.
8170 : */
8171 11176 : if (IsCatalogRelation(rel))
8172 7990 : prefetch_distance = maintenance_io_concurrency;
8173 : else
8174 : prefetch_distance =
8175 3186 : get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
8176 :
8177 : /* Cap initial prefetch distance for bottom-up deletion caller */
8178 11176 : if (delstate->bottomup)
8179 : {
8180 : Assert(nblocksfavorable >= 1);
8181 : Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
8182 3890 : prefetch_distance = Min(prefetch_distance, nblocksfavorable);
8183 : }
8184 :
8185 : /* Start prefetching. */
8186 11176 : index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
8187 : #endif
8188 :
8189 : /* Iterate over deltids, determine which to delete, check their horizon */
8190 : Assert(delstate->ndeltids > 0);
8191 1056078 : for (int i = 0; i < delstate->ndeltids; i++)
8192 : {
8193 1048792 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8194 1048792 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8195 1048792 : ItemPointer htid = &ideltid->tid;
8196 : OffsetNumber offnum;
8197 :
8198 : /*
8199 : * Read buffer, and perform required extra steps each time a new block
8200 : * is encountered. Avoid refetching if it's the same block as the one
8201 : * from the last htid.
8202 : */
8203 2086408 : if (blkno == InvalidBlockNumber ||
8204 1037616 : ItemPointerGetBlockNumber(htid) != blkno)
8205 : {
8206 : /*
8207 : * Consider giving up early for bottom-up index deletion caller
8208 : * first. (Only prefetch next-next block afterwards, when it
8209 : * becomes clear that we're at least going to access the next
8210 : * block in line.)
8211 : *
8212 : * Sometimes the first block frees so much space for bottom-up
8213 : * caller that the deletion process can end without accessing any
8214 : * more blocks. It is usually necessary to access 2 or 3 blocks
8215 : * per bottom-up deletion operation, though.
8216 : */
8217 29844 : if (delstate->bottomup)
8218 : {
8219 : /*
8220 : * We often allow caller to delete a few additional items
8221 : * whose entries we reached after the point that space target
8222 : * from caller was satisfied. The cost of accessing the page
8223 : * was already paid at that point, so it made sense to finish
8224 : * it off. When that happened, we finalize everything here
8225 : * (by finishing off the whole bottom-up deletion operation
8226 : * without needlessly paying the cost of accessing any more
8227 : * blocks).
8228 : */
8229 8282 : if (bottomup_final_block)
8230 292 : break;
8231 :
8232 : /*
8233 : * Give up when we didn't enable our caller to free any
8234 : * additional space as a result of processing the page that we
8235 : * just finished up with. This rule is the main way in which
8236 : * we keep the cost of bottom-up deletion under control.
8237 : */
8238 7990 : if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
8239 3598 : break;
8240 4392 : lastfreespace = actualfreespace; /* for next time */
8241 :
8242 : /*
8243 : * Deletion operation (which is bottom-up) will definitely
8244 : * access the next block in line. Prepare for that now.
8245 : *
8246 : * Decay target free space so that we don't hang on for too
8247 : * long with a marginal case. (Space target is only truly
8248 : * helpful when it allows us to recognize that we don't need
8249 : * to access more than 1 or 2 blocks to satisfy caller due to
8250 : * agreeable workload characteristics.)
8251 : *
8252 : * We are a bit more patient when we encounter contiguous
8253 : * blocks, though: these are treated as favorable blocks. The
8254 : * decay process is only applied when the next block in line
8255 : * is not a favorable/contiguous block. This is not an
8256 : * exception to the general rule; we still insist on finding
8257 : * at least one deletable item per block accessed. See
8258 : * bottomup_nblocksfavorable() for full details of the theory
8259 : * behind favorable blocks and heap block locality in general.
8260 : *
8261 : * Note: The first block in line is always treated as a
8262 : * favorable block, so the earliest possible point that the
8263 : * decay can be applied is just before we access the second
8264 : * block in line. The Assert() verifies this for us.
8265 : */
8266 : Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
8267 4392 : if (nblocksfavorable > 0)
8268 4216 : nblocksfavorable--;
8269 : else
8270 176 : curtargetfreespace /= 2;
8271 : }
8272 :
8273 : /* release old buffer */
8274 25954 : if (BufferIsValid(buf))
8275 14778 : UnlockReleaseBuffer(buf);
8276 :
8277 25954 : blkno = ItemPointerGetBlockNumber(htid);
8278 25954 : buf = ReadBuffer(rel, blkno);
8279 25954 : nblocksaccessed++;
8280 : Assert(!delstate->bottomup ||
8281 : nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
8282 :
8283 : #ifdef USE_PREFETCH
8284 :
8285 : /*
8286 : * To maintain the prefetch distance, prefetch one more page for
8287 : * each page we read.
8288 : */
8289 25954 : index_delete_prefetch_buffer(rel, &prefetch_state, 1);
8290 : #endif
8291 :
8292 25954 : LockBuffer(buf, BUFFER_LOCK_SHARE);
8293 :
8294 25954 : page = BufferGetPage(buf);
8295 25954 : maxoff = PageGetMaxOffsetNumber(page);
8296 : }
8297 :
8298 : /*
8299 : * In passing, detect index corruption involving an index page with a
8300 : * TID that points to a location in the heap that couldn't possibly be
8301 : * correct. We only do this with actual TIDs from caller's index page
8302 : * (not items reached by traversing through a HOT chain).
8303 : */
8304 1044902 : index_delete_check_htid(delstate, page, maxoff, htid, istatus);
8305 :
8306 1044902 : if (istatus->knowndeletable)
8307 : Assert(!delstate->bottomup && !istatus->promising);
8308 : else
8309 : {
8310 782314 : ItemPointerData tmp = *htid;
8311 : HeapTupleData heapTuple;
8312 :
8313 : /* Are any tuples from this HOT chain non-vacuumable? */
8314 782314 : if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
8315 : &heapTuple, NULL, true))
8316 468852 : continue; /* can't delete entry */
8317 :
8318 : /* Caller will delete, since whole HOT chain is vacuumable */
8319 313462 : istatus->knowndeletable = true;
8320 :
8321 : /* Maintain index free space info for bottom-up deletion case */
8322 313462 : if (delstate->bottomup)
8323 : {
8324 : Assert(istatus->freespace > 0);
8325 13828 : actualfreespace += istatus->freespace;
8326 13828 : if (actualfreespace >= curtargetfreespace)
8327 4354 : bottomup_final_block = true;
8328 : }
8329 : }
8330 :
8331 : /*
8332 : * Maintain snapshotConflictHorizon value for deletion operation as a
8333 : * whole by advancing current value using heap tuple headers. This is
8334 : * loosely based on the logic for pruning a HOT chain.
8335 : */
8336 576050 : offnum = ItemPointerGetOffsetNumber(htid);
8337 576050 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8338 : for (;;)
8339 41178 : {
8340 : ItemId lp;
8341 : HeapTupleHeader htup;
8342 :
8343 : /* Sanity check (pure paranoia) */
8344 617228 : if (offnum < FirstOffsetNumber)
8345 0 : break;
8346 :
8347 : /*
8348 : * An offset past the end of page's line pointer array is possible
8349 : * when the array was truncated
8350 : */
8351 617228 : if (offnum > maxoff)
8352 0 : break;
8353 :
8354 617228 : lp = PageGetItemId(page, offnum);
8355 617228 : if (ItemIdIsRedirected(lp))
8356 : {
8357 18804 : offnum = ItemIdGetRedirect(lp);
8358 18804 : continue;
8359 : }
8360 :
8361 : /*
8362 : * We'll often encounter LP_DEAD line pointers (especially with an
8363 : * entry marked knowndeletable by our caller up front). No heap
8364 : * tuple headers get examined for an htid that leads us to an
8365 : * LP_DEAD item. This is okay because the earlier pruning
8366 : * operation that made the line pointer LP_DEAD in the first place
8367 : * must have considered the original tuple header as part of
8368 : * generating its own snapshotConflictHorizon value.
8369 : *
8370 : * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8371 : * the same strategy that index vacuuming uses in all cases. Index
8372 : * VACUUM WAL records don't even have a snapshotConflictHorizon
8373 : * field of their own for this reason.
8374 : */
8375 598424 : if (!ItemIdIsNormal(lp))
8376 379710 : break;
8377 :
8378 218714 : htup = (HeapTupleHeader) PageGetItem(page, lp);
8379 :
8380 : /*
8381 : * Check the tuple XMIN against prior XMAX, if any
8382 : */
8383 241088 : if (TransactionIdIsValid(priorXmax) &&
8384 22374 : !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
8385 0 : break;
8386 :
8387 218714 : HeapTupleHeaderAdvanceConflictHorizon(htup,
8388 : &snapshotConflictHorizon);
8389 :
8390 : /*
8391 : * If the tuple is not HOT-updated, then we are at the end of this
8392 : * HOT-chain. No need to visit later tuples from the same update
8393 : * chain (they get their own index entries) -- just move on to
8394 : * next htid from index AM caller.
8395 : */
8396 218714 : if (!HeapTupleHeaderIsHotUpdated(htup))
8397 196340 : break;
8398 :
8399 : /* Advance to next HOT chain member */
8400 : Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8401 22374 : offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8402 22374 : priorXmax = HeapTupleHeaderGetUpdateXid(htup);
8403 : }
8404 :
8405 : /* Enable further/final shrinking of deltids for caller */
8406 576050 : finalndeltids = i + 1;
8407 : }
8408 :
8409 11176 : UnlockReleaseBuffer(buf);
8410 :
8411 : /*
8412 : * Shrink deltids array to exclude non-deletable entries at the end. This
8413 : * is not just a minor optimization. Final deltids array size might be
8414 : * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8415 : * ndeltids being zero in all cases with zero total deletable entries.
8416 : */
8417 : Assert(finalndeltids > 0 || delstate->bottomup);
8418 11176 : delstate->ndeltids = finalndeltids;
8419 :
8420 11176 : return snapshotConflictHorizon;
8421 : }
8422 :
8423 : /*
8424 : * Specialized inlineable comparison function for index_delete_sort()
8425 : */
8426 : static inline int
8427 24718074 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
8428 : {
8429 24718074 : ItemPointer tid1 = &deltid1->tid;
8430 24718074 : ItemPointer tid2 = &deltid2->tid;
8431 :
8432 : {
8433 24718074 : BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
8434 24718074 : BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
8435 :
8436 24718074 : if (blk1 != blk2)
8437 10117330 : return (blk1 < blk2) ? -1 : 1;
8438 : }
8439 : {
8440 14600744 : OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
8441 14600744 : OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
8442 :
8443 14600744 : if (pos1 != pos2)
8444 14600744 : return (pos1 < pos2) ? -1 : 1;
8445 : }
8446 :
8447 : Assert(false);
8448 :
8449 0 : return 0;
8450 : }
8451 :
8452 : /*
8453 : * Sort deltids array from delstate by TID. This prepares it for further
8454 : * processing by heap_index_delete_tuples().
8455 : *
8456 : * This operation becomes a noticeable consumer of CPU cycles with some
8457 : * workloads, so we go to the trouble of specialization/micro optimization.
8458 : * We use shellsort for this because it's easy to specialize, compiles to
8459 : * relatively few instructions, and is adaptive to presorted inputs/subsets
8460 : * (which are typical here).
8461 : */
8462 : static void
8463 11176 : index_delete_sort(TM_IndexDeleteOp *delstate)
8464 : {
8465 11176 : TM_IndexDelete *deltids = delstate->deltids;
8466 11176 : int ndeltids = delstate->ndeltids;
8467 :
8468 : /*
8469 : * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8470 : *
8471 : * This implementation is fast with array sizes up to ~4500. This covers
8472 : * all supported BLCKSZ values.
8473 : */
8474 11176 : const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8475 :
8476 : /* Think carefully before changing anything here -- keep swaps cheap */
8477 : StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8478 : "element size exceeds 8 bytes");
8479 :
8480 111760 : for (int g = 0; g < lengthof(gaps); g++)
8481 : {
8482 14763506 : for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8483 : {
8484 14662922 : TM_IndexDelete d = deltids[i];
8485 14662922 : int j = i;
8486 :
8487 25430330 : while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8488 : {
8489 10767408 : deltids[j] = deltids[j - hi];
8490 10767408 : j -= hi;
8491 : }
8492 14662922 : deltids[j] = d;
8493 : }
8494 : }
8495 11176 : }
8496 :
8497 : /*
8498 : * Returns how many blocks should be considered favorable/contiguous for a
8499 : * bottom-up index deletion pass. This is a number of heap blocks that starts
8500 : * from and includes the first block in line.
8501 : *
8502 : * There is always at least one favorable block during bottom-up index
8503 : * deletion. In the worst case (i.e. with totally random heap blocks) the
8504 : * first block in line (the only favorable block) can be thought of as a
8505 : * degenerate array of contiguous blocks that consists of a single block.
8506 : * heap_index_delete_tuples() will expect this.
8507 : *
8508 : * Caller passes blockgroups, a description of the final order that deltids
8509 : * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8510 : * processing. Note that deltids need not actually be sorted just yet (caller
8511 : * only passes deltids to us so that we can interpret blockgroups).
8512 : *
8513 : * You might guess that the existence of contiguous blocks cannot matter much,
8514 : * since in general the main factor that determines which blocks we visit is
8515 : * the number of promising TIDs, which is a fixed hint from the index AM.
8516 : * We're not really targeting the general case, though -- the actual goal is
8517 : * to adapt our behavior to a wide variety of naturally occurring conditions.
8518 : * The effects of most of the heuristics we apply are only noticeable in the
8519 : * aggregate, over time and across many _related_ bottom-up index deletion
8520 : * passes.
8521 : *
8522 : * Deeming certain blocks favorable allows heapam to recognize and adapt to
8523 : * workloads where heap blocks visited during bottom-up index deletion can be
8524 : * accessed contiguously, in the sense that each newly visited block is the
8525 : * neighbor of the block that bottom-up deletion just finished processing (or
8526 : * close enough to it). It will likely be cheaper to access more favorable
8527 : * blocks sooner rather than later (e.g. in this pass, not across a series of
8528 : * related bottom-up passes). Either way it is probably only a matter of time
8529 : * (or a matter of further correlated version churn) before all blocks that
8530 : * appear together as a single large batch of favorable blocks get accessed by
8531 : * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8532 : * appear almost constantly or not even once (it all depends on per-index
8533 : * workload characteristics).
8534 : *
8535 : * Note that the blockgroups sort order applies a power-of-two bucketing
8536 : * scheme that creates opportunities for contiguous groups of blocks to get
8537 : * batched together, at least with workloads that are naturally amenable to
8538 : * being driven by heap block locality. This doesn't just enhance the spatial
8539 : * locality of bottom-up heap block processing in the obvious way. It also
8540 : * enables temporal locality of access, since sorting by heap block number
8541 : * naturally tends to make the bottom-up processing order deterministic.
8542 : *
8543 : * Consider the following example to get a sense of how temporal locality
8544 : * might matter: There is a heap relation with several indexes, each of which
8545 : * is low to medium cardinality. It is subject to constant non-HOT updates.
8546 : * The updates are skewed (in one part of the primary key, perhaps). None of
8547 : * the indexes are logically modified by the UPDATE statements (if they were
8548 : * then bottom-up index deletion would not be triggered in the first place).
8549 : * Naturally, each new round of index tuples (for each heap tuple that gets a
8550 : * heap_update() call) will have the same heap TID in each and every index.
8551 : * Since these indexes are low cardinality and never get logically modified,
8552 : * heapam processing during bottom-up deletion passes will access heap blocks
8553 : * in approximately sequential order. Temporal locality of access occurs due
8554 : * to bottom-up deletion passes behaving very similarly across each of the
8555 : * indexes at any given moment. This keeps the number of buffer misses needed
8556 : * to visit heap blocks to a minimum.
8557 : */
8558 : static int
8559 3890 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
8560 : TM_IndexDelete *deltids)
8561 : {
8562 3890 : int64 lastblock = -1;
8563 3890 : int nblocksfavorable = 0;
8564 :
8565 : Assert(nblockgroups >= 1);
8566 : Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
8567 :
8568 : /*
8569 : * We tolerate heap blocks that will be accessed only slightly out of
8570 : * physical order. Small blips occur when a pair of almost-contiguous
8571 : * blocks happen to fall into different buckets (perhaps due only to a
8572 : * small difference in npromisingtids that the bucketing scheme didn't
8573 : * quite manage to ignore). We effectively ignore these blips by applying
8574 : * a small tolerance. The precise tolerance we use is a little arbitrary,
8575 : * but it works well enough in practice.
8576 : */
8577 12198 : for (int b = 0; b < nblockgroups; b++)
8578 : {
8579 11690 : IndexDeleteCounts *group = blockgroups + b;
8580 11690 : TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8581 11690 : BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
8582 :
8583 11690 : if (lastblock != -1 &&
8584 7800 : ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
8585 6662 : (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
8586 : break;
8587 :
8588 8308 : nblocksfavorable++;
8589 8308 : lastblock = block;
8590 : }
8591 :
8592 : /* Always indicate that there is at least 1 favorable block */
8593 : Assert(nblocksfavorable >= 1);
8594 :
8595 3890 : return nblocksfavorable;
8596 : }
8597 :
8598 : /*
8599 : * qsort comparison function for bottomup_sort_and_shrink()
8600 : */
8601 : static int
8602 403498 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8603 : {
8604 403498 : const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
8605 403498 : const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
8606 :
8607 : /*
8608 : * Most significant field is npromisingtids (which we invert the order of
8609 : * so as to sort in desc order).
8610 : *
8611 : * Caller should have already normalized npromisingtids fields into
8612 : * power-of-two values (buckets).
8613 : */
8614 403498 : if (group1->npromisingtids > group2->npromisingtids)
8615 19402 : return -1;
8616 384096 : if (group1->npromisingtids < group2->npromisingtids)
8617 21708 : return 1;
8618 :
8619 : /*
8620 : * Tiebreak: desc ntids sort order.
8621 : *
8622 : * We cannot expect power-of-two values for ntids fields. We should
8623 : * behave as if they were already rounded up for us instead.
8624 : */
8625 362388 : if (group1->ntids != group2->ntids)
8626 : {
8627 250708 : uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
8628 250708 : uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
8629 :
8630 250708 : if (ntids1 > ntids2)
8631 37356 : return -1;
8632 213352 : if (ntids1 < ntids2)
8633 52064 : return 1;
8634 : }
8635 :
8636 : /*
8637 : * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8638 : * block in deltids array) order.
8639 : *
8640 : * This is equivalent to sorting in ascending heap block number order
8641 : * (among otherwise equal subsets of the array). This approach allows us
8642 : * to avoid accessing the out-of-line TID. (We rely on the assumption
8643 : * that the deltids array was sorted in ascending heap TID order when
8644 : * these offsets to the first TID from each heap block group were formed.)
8645 : */
8646 272968 : if (group1->ifirsttid > group2->ifirsttid)
8647 136276 : return 1;
8648 136692 : if (group1->ifirsttid < group2->ifirsttid)
8649 136692 : return -1;
8650 :
8651 0 : pg_unreachable();
8652 :
8653 : return 0;
8654 : }
8655 :
8656 : /*
8657 : * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8658 : *
8659 : * Sorts deltids array in the order needed for useful processing by bottom-up
8660 : * deletion. The array should already be sorted in TID order when we're
8661 : * called. The sort process groups heap TIDs from deltids into heap block
8662 : * groupings. Earlier/more-promising groups/blocks are usually those that are
8663 : * known to have the most "promising" TIDs.
8664 : *
8665 : * Sets new size of deltids array (ndeltids) in state. deltids will only have
8666 : * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8667 : * return. This often means that deltids will be shrunk to a small fraction
8668 : * of its original size (we eliminate many heap blocks from consideration for
8669 : * caller up front).
8670 : *
8671 : * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8672 : * for a definition and full details.
8673 : */
8674 : static int
8675 3890 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
8676 : {
8677 : IndexDeleteCounts *blockgroups;
8678 : TM_IndexDelete *reordereddeltids;
8679 3890 : BlockNumber curblock = InvalidBlockNumber;
8680 3890 : int nblockgroups = 0;
8681 3890 : int ncopied = 0;
8682 3890 : int nblocksfavorable = 0;
8683 :
8684 : Assert(delstate->bottomup);
8685 : Assert(delstate->ndeltids > 0);
8686 :
8687 : /* Calculate per-heap-block count of TIDs */
8688 3890 : blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
8689 1833682 : for (int i = 0; i < delstate->ndeltids; i++)
8690 : {
8691 1829792 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8692 1829792 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8693 1829792 : ItemPointer htid = &ideltid->tid;
8694 1829792 : bool promising = istatus->promising;
8695 :
8696 1829792 : if (curblock != ItemPointerGetBlockNumber(htid))
8697 : {
8698 : /* New block group */
8699 78152 : nblockgroups++;
8700 :
8701 : Assert(curblock < ItemPointerGetBlockNumber(htid) ||
8702 : !BlockNumberIsValid(curblock));
8703 :
8704 78152 : curblock = ItemPointerGetBlockNumber(htid);
8705 78152 : blockgroups[nblockgroups - 1].ifirsttid = i;
8706 78152 : blockgroups[nblockgroups - 1].ntids = 1;
8707 78152 : blockgroups[nblockgroups - 1].npromisingtids = 0;
8708 : }
8709 : else
8710 : {
8711 1751640 : blockgroups[nblockgroups - 1].ntids++;
8712 : }
8713 :
8714 1829792 : if (promising)
8715 237048 : blockgroups[nblockgroups - 1].npromisingtids++;
8716 : }
8717 :
8718 : /*
8719 : * We're about ready to sort block groups to determine the optimal order
8720 : * for visiting heap blocks. But before we do, round the number of
8721 : * promising tuples for each block group up to the next power-of-two,
8722 : * unless it is very low (less than 4), in which case we round up to 4.
8723 : * npromisingtids is far too noisy to trust when choosing between a pair
8724 : * of block groups that both have very low values.
8725 : *
8726 : * This scheme divides heap blocks/block groups into buckets. Each bucket
8727 : * contains blocks that have _approximately_ the same number of promising
8728 : * TIDs as each other. The goal is to ignore relatively small differences
8729 : * in the total number of promising entries, so that the whole process can
8730 : * give a little weight to heapam factors (like heap block locality)
8731 : * instead. This isn't a trade-off, really -- we have nothing to lose. It
8732 : * would be foolish to interpret small differences in npromisingtids
8733 : * values as anything more than noise.
8734 : *
8735 : * We tiebreak on nhtids when sorting block group subsets that have the
8736 : * same npromisingtids, but this has the same issues as npromisingtids,
8737 : * and so nhtids is subject to the same power-of-two bucketing scheme. The
8738 : * only reason that we don't fix nhtids in the same way here too is that
8739 : * we'll need accurate nhtids values after the sort. We handle nhtids
8740 : * bucketization dynamically instead (in the sort comparator).
8741 : *
8742 : * See bottomup_nblocksfavorable() for a full explanation of when and how
8743 : * heap locality/favorable blocks can significantly influence when and how
8744 : * heap blocks are accessed.
8745 : */
8746 82042 : for (int b = 0; b < nblockgroups; b++)
8747 : {
8748 78152 : IndexDeleteCounts *group = blockgroups + b;
8749 :
8750 : /* Better off falling back on nhtids with low npromisingtids */
8751 78152 : if (group->npromisingtids <= 4)
8752 67172 : group->npromisingtids = 4;
8753 : else
8754 10980 : group->npromisingtids =
8755 10980 : pg_nextpower2_32((uint32) group->npromisingtids);
8756 : }
8757 :
8758 : /* Sort groups and rearrange caller's deltids array */
8759 3890 : qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
8760 : bottomup_sort_and_shrink_cmp);
8761 3890 : reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8762 :
8763 3890 : nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
8764 : /* Determine number of favorable blocks at the start of final deltids */
8765 3890 : nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
8766 : delstate->deltids);
8767 :
8768 26012 : for (int b = 0; b < nblockgroups; b++)
8769 : {
8770 22122 : IndexDeleteCounts *group = blockgroups + b;
8771 22122 : TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8772 :
8773 22122 : memcpy(reordereddeltids + ncopied, firstdtid,
8774 22122 : sizeof(TM_IndexDelete) * group->ntids);
8775 22122 : ncopied += group->ntids;
8776 : }
8777 :
8778 : /* Copy final grouped and sorted TIDs back into start of caller's array */
8779 3890 : memcpy(delstate->deltids, reordereddeltids,
8780 : sizeof(TM_IndexDelete) * ncopied);
8781 3890 : delstate->ndeltids = ncopied;
8782 :
8783 3890 : pfree(reordereddeltids);
8784 3890 : pfree(blockgroups);
8785 :
8786 3890 : return nblocksfavorable;
8787 : }
8788 :
8789 : /*
8790 : * Perform XLogInsert for a heap-visible operation. 'block' is the block
8791 : * being marked all-visible, and vm_buffer is the buffer containing the
8792 : * corresponding visibility map block. Both should have already been modified
8793 : * and dirtied.
8794 : *
8795 : * snapshotConflictHorizon comes from the largest xmin on the page being
8796 : * marked all-visible. REDO routine uses it to generate recovery conflicts.
8797 : *
8798 : * If checksums or wal_log_hints are enabled, we may also generate a full-page
8799 : * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
8800 : * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
8801 : * update the heap page's LSN.
8802 : */
8803 : XLogRecPtr
8804 93838 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
8805 : TransactionId snapshotConflictHorizon, uint8 vmflags)
8806 : {
8807 : xl_heap_visible xlrec;
8808 : XLogRecPtr recptr;
8809 : uint8 flags;
8810 :
8811 : Assert(BufferIsValid(heap_buffer));
8812 : Assert(BufferIsValid(vm_buffer));
8813 :
8814 93838 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
8815 93838 : xlrec.flags = vmflags;
8816 93838 : if (RelationIsAccessibleInLogicalDecoding(rel))
8817 254 : xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
8818 93838 : XLogBeginInsert();
8819 93838 : XLogRegisterData(&xlrec, SizeOfHeapVisible);
8820 :
8821 93838 : XLogRegisterBuffer(0, vm_buffer, 0);
8822 :
8823 93838 : flags = REGBUF_STANDARD;
8824 93838 : if (!XLogHintBitIsNeeded())
8825 6790 : flags |= REGBUF_NO_IMAGE;
8826 93838 : XLogRegisterBuffer(1, heap_buffer, flags);
8827 :
8828 93838 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
8829 :
8830 93838 : return recptr;
8831 : }
8832 :
8833 : /*
8834 : * Perform XLogInsert for a heap-update operation. Caller must already
8835 : * have modified the buffer(s) and marked them dirty.
8836 : */
8837 : static XLogRecPtr
8838 586614 : log_heap_update(Relation reln, Buffer oldbuf,
8839 : Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
8840 : HeapTuple old_key_tuple,
8841 : bool all_visible_cleared, bool new_all_visible_cleared)
8842 : {
8843 : xl_heap_update xlrec;
8844 : xl_heap_header xlhdr;
8845 : xl_heap_header xlhdr_idx;
8846 : uint8 info;
8847 : uint16 prefix_suffix[2];
8848 586614 : uint16 prefixlen = 0,
8849 586614 : suffixlen = 0;
8850 : XLogRecPtr recptr;
8851 586614 : Page page = BufferGetPage(newbuf);
8852 586614 : bool need_tuple_data = RelationIsLogicallyLogged(reln);
8853 : bool init;
8854 : int bufflags;
8855 :
8856 : /* Caller should not call me on a non-WAL-logged relation */
8857 : Assert(RelationNeedsWAL(reln));
8858 :
8859 586614 : XLogBeginInsert();
8860 :
8861 586614 : if (HeapTupleIsHeapOnly(newtup))
8862 284406 : info = XLOG_HEAP_HOT_UPDATE;
8863 : else
8864 302208 : info = XLOG_HEAP_UPDATE;
8865 :
8866 : /*
8867 : * If the old and new tuple are on the same page, we only need to log the
8868 : * parts of the new tuple that were changed. That saves on the amount of
8869 : * WAL we need to write. Currently, we just count any unchanged bytes in
8870 : * the beginning and end of the tuple. That's quick to check, and
8871 : * perfectly covers the common case that only one field is updated.
8872 : *
8873 : * We could do this even if the old and new tuple are on different pages,
8874 : * but only if we don't make a full-page image of the old page, which is
8875 : * difficult to know in advance. Also, if the old tuple is corrupt for
8876 : * some reason, it would allow the corruption to propagate the new page,
8877 : * so it seems best to avoid. Under the general assumption that most
8878 : * updates tend to create the new tuple version on the same page, there
8879 : * isn't much to be gained by doing this across pages anyway.
8880 : *
8881 : * Skip this if we're taking a full-page image of the new page, as we
8882 : * don't include the new tuple in the WAL record in that case. Also
8883 : * disable if wal_level='logical', as logical decoding needs to be able to
8884 : * read the new tuple in whole from the WAL record alone.
8885 : */
8886 586614 : if (oldbuf == newbuf && !need_tuple_data &&
8887 284206 : !XLogCheckBufferNeedsBackup(newbuf))
8888 : {
8889 282890 : char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8890 282890 : char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8891 282890 : int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8892 282890 : int newlen = newtup->t_len - newtup->t_data->t_hoff;
8893 :
8894 : /* Check for common prefix between old and new tuple */
8895 23533886 : for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8896 : {
8897 23482538 : if (newp[prefixlen] != oldp[prefixlen])
8898 231542 : break;
8899 : }
8900 :
8901 : /*
8902 : * Storing the length of the prefix takes 2 bytes, so we need to save
8903 : * at least 3 bytes or there's no point.
8904 : */
8905 282890 : if (prefixlen < 3)
8906 44116 : prefixlen = 0;
8907 :
8908 : /* Same for suffix */
8909 9066908 : for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
8910 : {
8911 9015058 : if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8912 231040 : break;
8913 : }
8914 282890 : if (suffixlen < 3)
8915 70576 : suffixlen = 0;
8916 : }
8917 :
8918 : /* Prepare main WAL data chain */
8919 586614 : xlrec.flags = 0;
8920 586614 : if (all_visible_cleared)
8921 3064 : xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
8922 586614 : if (new_all_visible_cleared)
8923 1780 : xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
8924 586614 : if (prefixlen > 0)
8925 238774 : xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
8926 586614 : if (suffixlen > 0)
8927 212314 : xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
8928 586614 : if (need_tuple_data)
8929 : {
8930 94048 : xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
8931 94048 : if (old_key_tuple)
8932 : {
8933 292 : if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
8934 130 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
8935 : else
8936 162 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
8937 : }
8938 : }
8939 :
8940 : /* If new tuple is the single and first tuple on page... */
8941 593618 : if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
8942 7004 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
8943 : {
8944 6568 : info |= XLOG_HEAP_INIT_PAGE;
8945 6568 : init = true;
8946 : }
8947 : else
8948 580046 : init = false;
8949 :
8950 : /* Prepare WAL data for the old page */
8951 586614 : xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
8952 586614 : xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
8953 1173228 : xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
8954 586614 : oldtup->t_data->t_infomask2);
8955 :
8956 : /* Prepare WAL data for the new page */
8957 586614 : xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
8958 586614 : xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
8959 :
8960 586614 : bufflags = REGBUF_STANDARD;
8961 586614 : if (init)
8962 6568 : bufflags |= REGBUF_WILL_INIT;
8963 586614 : if (need_tuple_data)
8964 94048 : bufflags |= REGBUF_KEEP_DATA;
8965 :
8966 586614 : XLogRegisterBuffer(0, newbuf, bufflags);
8967 586614 : if (oldbuf != newbuf)
8968 278520 : XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
8969 :
8970 586614 : XLogRegisterData(&xlrec, SizeOfHeapUpdate);
8971 :
8972 : /*
8973 : * Prepare WAL data for the new tuple.
8974 : */
8975 586614 : if (prefixlen > 0 || suffixlen > 0)
8976 : {
8977 281968 : if (prefixlen > 0 && suffixlen > 0)
8978 : {
8979 169120 : prefix_suffix[0] = prefixlen;
8980 169120 : prefix_suffix[1] = suffixlen;
8981 169120 : XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
8982 : }
8983 112848 : else if (prefixlen > 0)
8984 : {
8985 69654 : XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
8986 : }
8987 : else
8988 : {
8989 43194 : XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
8990 : }
8991 : }
8992 :
8993 586614 : xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
8994 586614 : xlhdr.t_infomask = newtup->t_data->t_infomask;
8995 586614 : xlhdr.t_hoff = newtup->t_data->t_hoff;
8996 : Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
8997 :
8998 : /*
8999 : * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
9000 : *
9001 : * The 'data' doesn't include the common prefix or suffix.
9002 : */
9003 586614 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
9004 586614 : if (prefixlen == 0)
9005 : {
9006 347840 : XLogRegisterBufData(0,
9007 347840 : (char *) newtup->t_data + SizeofHeapTupleHeader,
9008 347840 : newtup->t_len - SizeofHeapTupleHeader - suffixlen);
9009 : }
9010 : else
9011 : {
9012 : /*
9013 : * Have to write the null bitmap and data after the common prefix as
9014 : * two separate rdata entries.
9015 : */
9016 : /* bitmap [+ padding] [+ oid] */
9017 238774 : if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
9018 : {
9019 238774 : XLogRegisterBufData(0,
9020 238774 : (char *) newtup->t_data + SizeofHeapTupleHeader,
9021 238774 : newtup->t_data->t_hoff - SizeofHeapTupleHeader);
9022 : }
9023 :
9024 : /* data after common prefix */
9025 238774 : XLogRegisterBufData(0,
9026 238774 : (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
9027 238774 : newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
9028 : }
9029 :
9030 : /* We need to log a tuple identity */
9031 586614 : if (need_tuple_data && old_key_tuple)
9032 : {
9033 : /* don't really need this, but its more comfy to decode */
9034 292 : xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
9035 292 : xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
9036 292 : xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
9037 :
9038 292 : XLogRegisterData(&xlhdr_idx, SizeOfHeapHeader);
9039 :
9040 : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
9041 292 : XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
9042 292 : old_key_tuple->t_len - SizeofHeapTupleHeader);
9043 : }
9044 :
9045 : /* filtering by origin on a row level is much more efficient */
9046 586614 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
9047 :
9048 586614 : recptr = XLogInsert(RM_HEAP_ID, info);
9049 :
9050 586614 : return recptr;
9051 : }
9052 :
9053 : /*
9054 : * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
9055 : *
9056 : * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
9057 : * tuples.
9058 : */
9059 : static XLogRecPtr
9060 48880 : log_heap_new_cid(Relation relation, HeapTuple tup)
9061 : {
9062 : xl_heap_new_cid xlrec;
9063 :
9064 : XLogRecPtr recptr;
9065 48880 : HeapTupleHeader hdr = tup->t_data;
9066 :
9067 : Assert(ItemPointerIsValid(&tup->t_self));
9068 : Assert(tup->t_tableOid != InvalidOid);
9069 :
9070 48880 : xlrec.top_xid = GetTopTransactionId();
9071 48880 : xlrec.target_locator = relation->rd_locator;
9072 48880 : xlrec.target_tid = tup->t_self;
9073 :
9074 : /*
9075 : * If the tuple got inserted & deleted in the same TX we definitely have a
9076 : * combo CID, set cmin and cmax.
9077 : */
9078 48880 : if (hdr->t_infomask & HEAP_COMBOCID)
9079 : {
9080 : Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
9081 : Assert(!HeapTupleHeaderXminInvalid(hdr));
9082 4048 : xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9083 4048 : xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9084 4048 : xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
9085 : }
9086 : /* No combo CID, so only cmin or cmax can be set by this TX */
9087 : else
9088 : {
9089 : /*
9090 : * Tuple inserted.
9091 : *
9092 : * We need to check for LOCK ONLY because multixacts might be
9093 : * transferred to the new tuple in case of FOR KEY SHARE updates in
9094 : * which case there will be an xmax, although the tuple just got
9095 : * inserted.
9096 : */
9097 58424 : if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9098 13592 : HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
9099 : {
9100 31242 : xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
9101 31242 : xlrec.cmax = InvalidCommandId;
9102 : }
9103 : /* Tuple from a different tx updated or deleted. */
9104 : else
9105 : {
9106 13590 : xlrec.cmin = InvalidCommandId;
9107 13590 : xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
9108 : }
9109 44832 : xlrec.combocid = InvalidCommandId;
9110 : }
9111 :
9112 : /*
9113 : * Note that we don't need to register the buffer here, because this
9114 : * operation does not modify the page. The insert/update/delete that
9115 : * called us certainly did, but that's WAL-logged separately.
9116 : */
9117 48880 : XLogBeginInsert();
9118 48880 : XLogRegisterData(&xlrec, SizeOfHeapNewCid);
9119 :
9120 : /* will be looked at irrespective of origin */
9121 :
9122 48880 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
9123 :
9124 48880 : return recptr;
9125 : }
9126 :
9127 : /*
9128 : * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9129 : * the old tuple in an UPDATE or DELETE.
9130 : *
9131 : * Returns NULL if there's no need to log an identity or if there's no suitable
9132 : * key defined.
9133 : *
9134 : * Pass key_required true if any replica identity columns changed value, or if
9135 : * any of them have any external data. Delete must always pass true.
9136 : *
9137 : * *copy is set to true if the returned tuple is a modified copy rather than
9138 : * the same tuple that was passed in.
9139 : */
9140 : static HeapTuple
9141 3595028 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
9142 : bool *copy)
9143 : {
9144 3595028 : TupleDesc desc = RelationGetDescr(relation);
9145 3595028 : char replident = relation->rd_rel->relreplident;
9146 : Bitmapset *idattrs;
9147 : HeapTuple key_tuple;
9148 : bool nulls[MaxHeapAttributeNumber];
9149 : Datum values[MaxHeapAttributeNumber];
9150 :
9151 3595028 : *copy = false;
9152 :
9153 3595028 : if (!RelationIsLogicallyLogged(relation))
9154 3394438 : return NULL;
9155 :
9156 200590 : if (replident == REPLICA_IDENTITY_NOTHING)
9157 462 : return NULL;
9158 :
9159 200128 : if (replident == REPLICA_IDENTITY_FULL)
9160 : {
9161 : /*
9162 : * When logging the entire old tuple, it very well could contain
9163 : * toasted columns. If so, force them to be inlined.
9164 : */
9165 394 : if (HeapTupleHasExternal(tp))
9166 : {
9167 8 : *copy = true;
9168 8 : tp = toast_flatten_tuple(tp, desc);
9169 : }
9170 394 : return tp;
9171 : }
9172 :
9173 : /* if the key isn't required and we're only logging the key, we're done */
9174 199734 : if (!key_required)
9175 93756 : return NULL;
9176 :
9177 : /* find out the replica identity columns */
9178 105978 : idattrs = RelationGetIndexAttrBitmap(relation,
9179 : INDEX_ATTR_BITMAP_IDENTITY_KEY);
9180 :
9181 : /*
9182 : * If there's no defined replica identity columns, treat as !key_required.
9183 : * (This case should not be reachable from heap_update, since that should
9184 : * calculate key_required accurately. But heap_delete just passes
9185 : * constant true for key_required, so we can hit this case in deletes.)
9186 : */
9187 105978 : if (bms_is_empty(idattrs))
9188 12042 : return NULL;
9189 :
9190 : /*
9191 : * Construct a new tuple containing only the replica identity columns,
9192 : * with nulls elsewhere. While we're at it, assert that the replica
9193 : * identity columns aren't null.
9194 : */
9195 93936 : heap_deform_tuple(tp, desc, values, nulls);
9196 :
9197 301796 : for (int i = 0; i < desc->natts; i++)
9198 : {
9199 207860 : if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
9200 : idattrs))
9201 : Assert(!nulls[i]);
9202 : else
9203 113900 : nulls[i] = true;
9204 : }
9205 :
9206 93936 : key_tuple = heap_form_tuple(desc, values, nulls);
9207 93936 : *copy = true;
9208 :
9209 93936 : bms_free(idattrs);
9210 :
9211 : /*
9212 : * If the tuple, which by here only contains indexed columns, still has
9213 : * toasted columns, force them to be inlined. This is somewhat unlikely
9214 : * since there's limits on the size of indexed columns, so we don't
9215 : * duplicate toast_flatten_tuple()s functionality in the above loop over
9216 : * the indexed columns, even if it would be more efficient.
9217 : */
9218 93936 : if (HeapTupleHasExternal(key_tuple))
9219 : {
9220 8 : HeapTuple oldtup = key_tuple;
9221 :
9222 8 : key_tuple = toast_flatten_tuple(oldtup, desc);
9223 8 : heap_freetuple(oldtup);
9224 : }
9225 :
9226 93936 : return key_tuple;
9227 : }
9228 :
9229 : /*
9230 : * HeapCheckForSerializableConflictOut
9231 : * We are reading a tuple. If it's not visible, there may be a
9232 : * rw-conflict out with the inserter. Otherwise, if it is visible to us
9233 : * but has been deleted, there may be a rw-conflict out with the deleter.
9234 : *
9235 : * We will determine the top level xid of the writing transaction with which
9236 : * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9237 : * for overlap with our own transaction.
9238 : *
9239 : * This function should be called just about anywhere in heapam.c where a
9240 : * tuple has been read. The caller must hold at least a shared lock on the
9241 : * buffer, because this function might set hint bits on the tuple. There is
9242 : * currently no known reason to call this function from an index AM.
9243 : */
9244 : void
9245 63315812 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
9246 : HeapTuple tuple, Buffer buffer,
9247 : Snapshot snapshot)
9248 : {
9249 : TransactionId xid;
9250 : HTSV_Result htsvResult;
9251 :
9252 63315812 : if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9253 63265088 : return;
9254 :
9255 : /*
9256 : * Check to see whether the tuple has been written to by a concurrent
9257 : * transaction, either to create it not visible to us, or to delete it
9258 : * while it is visible to us. The "visible" bool indicates whether the
9259 : * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9260 : * is going on with it.
9261 : *
9262 : * In the event of a concurrently inserted tuple that also happens to have
9263 : * been concurrently updated (by a separate transaction), the xmin of the
9264 : * tuple will be used -- not the updater's xid.
9265 : */
9266 50724 : htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
9267 50724 : switch (htsvResult)
9268 : {
9269 49098 : case HEAPTUPLE_LIVE:
9270 49098 : if (visible)
9271 49072 : return;
9272 26 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9273 26 : break;
9274 722 : case HEAPTUPLE_RECENTLY_DEAD:
9275 : case HEAPTUPLE_DELETE_IN_PROGRESS:
9276 722 : if (visible)
9277 570 : xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9278 : else
9279 152 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9280 :
9281 722 : if (TransactionIdPrecedes(xid, TransactionXmin))
9282 : {
9283 : /* This is like the HEAPTUPLE_DEAD case */
9284 : Assert(!visible);
9285 134 : return;
9286 : }
9287 588 : break;
9288 656 : case HEAPTUPLE_INSERT_IN_PROGRESS:
9289 656 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9290 656 : break;
9291 248 : case HEAPTUPLE_DEAD:
9292 : Assert(!visible);
9293 248 : return;
9294 0 : default:
9295 :
9296 : /*
9297 : * The only way to get to this default clause is if a new value is
9298 : * added to the enum type without adding it to this switch
9299 : * statement. That's a bug, so elog.
9300 : */
9301 0 : elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9302 :
9303 : /*
9304 : * In spite of having all enum values covered and calling elog on
9305 : * this default, some compilers think this is a code path which
9306 : * allows xid to be used below without initialization. Silence
9307 : * that warning.
9308 : */
9309 : xid = InvalidTransactionId;
9310 : }
9311 :
9312 : Assert(TransactionIdIsValid(xid));
9313 : Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
9314 :
9315 : /*
9316 : * Find top level xid. Bail out if xid is too early to be a conflict, or
9317 : * if it's our own xid.
9318 : */
9319 1270 : if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
9320 128 : return;
9321 1142 : xid = SubTransGetTopmostTransaction(xid);
9322 1142 : if (TransactionIdPrecedes(xid, TransactionXmin))
9323 0 : return;
9324 :
9325 1142 : CheckForSerializableConflictOut(relation, xid, snapshot);
9326 : }
|