Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * heapam.c
4 : * heap access method code
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/heapam.c
12 : *
13 : *
14 : * INTERFACE ROUTINES
15 : * heap_beginscan - begin relation scan
16 : * heap_rescan - restart a relation scan
17 : * heap_endscan - end relation scan
18 : * heap_getnext - retrieve next tuple in scan
19 : * heap_fetch - retrieve tuple with given tid
20 : * heap_insert - insert tuple into a relation
21 : * heap_multi_insert - insert multiple tuples into a relation
22 : * heap_delete - delete a tuple from a relation
23 : * heap_update - replace a tuple in a relation with another tuple
24 : *
25 : * NOTES
26 : * This file contains the heap_ routines which implement
27 : * the POSTGRES heap access method used for all POSTGRES
28 : * relations.
29 : *
30 : *-------------------------------------------------------------------------
31 : */
32 : #include "postgres.h"
33 :
34 : #include "access/heapam.h"
35 : #include "access/heaptoast.h"
36 : #include "access/hio.h"
37 : #include "access/multixact.h"
38 : #include "access/subtrans.h"
39 : #include "access/syncscan.h"
40 : #include "access/valid.h"
41 : #include "access/visibilitymap.h"
42 : #include "access/xloginsert.h"
43 : #include "catalog/pg_database.h"
44 : #include "catalog/pg_database_d.h"
45 : #include "commands/vacuum.h"
46 : #include "pgstat.h"
47 : #include "port/pg_bitutils.h"
48 : #include "storage/lmgr.h"
49 : #include "storage/predicate.h"
50 : #include "storage/procarray.h"
51 : #include "utils/datum.h"
52 : #include "utils/injection_point.h"
53 : #include "utils/inval.h"
54 : #include "utils/spccache.h"
55 : #include "utils/syscache.h"
56 :
57 :
58 : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
59 : TransactionId xid, CommandId cid, int options);
60 : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
61 : Buffer newbuf, HeapTuple oldtup,
62 : HeapTuple newtup, HeapTuple old_key_tuple,
63 : bool all_visible_cleared, bool new_all_visible_cleared);
64 : #ifdef USE_ASSERT_CHECKING
65 : static void check_lock_if_inplace_updateable_rel(Relation relation,
66 : ItemPointer otid,
67 : HeapTuple newtup);
68 : static void check_inplace_rel_lock(HeapTuple oldtup);
69 : #endif
70 : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
71 : Bitmapset *interesting_cols,
72 : Bitmapset *external_cols,
73 : HeapTuple oldtup, HeapTuple newtup,
74 : bool *has_external);
75 : static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
76 : LockTupleMode mode, LockWaitPolicy wait_policy,
77 : bool *have_tuple_lock);
78 : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
79 : BlockNumber block,
80 : ScanDirection dir);
81 : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
82 : ScanDirection dir);
83 : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
84 : uint16 old_infomask2, TransactionId add_to_xmax,
85 : LockTupleMode mode, bool is_update,
86 : TransactionId *result_xmax, uint16 *result_infomask,
87 : uint16 *result_infomask2);
88 : static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
89 : ItemPointer ctid, TransactionId xid,
90 : LockTupleMode mode);
91 : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
92 : uint16 *new_infomask2);
93 : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
94 : uint16 t_infomask);
95 : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
96 : LockTupleMode lockmode, bool *current_is_member);
97 : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
98 : Relation rel, ItemPointer ctid, XLTW_Oper oper,
99 : int *remaining);
100 : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
101 : uint16 infomask, Relation rel, int *remaining,
102 : bool logLockFailure);
103 : static void index_delete_sort(TM_IndexDeleteOp *delstate);
104 : static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
105 : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
106 : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
107 : bool *copy);
108 :
109 :
110 : /*
111 : * Each tuple lock mode has a corresponding heavyweight lock, and one or two
112 : * corresponding MultiXactStatuses (one to merely lock tuples, another one to
113 : * update them). This table (and the macros below) helps us determine the
114 : * heavyweight lock mode and MultiXactStatus values to use for any particular
115 : * tuple lock strength.
116 : *
117 : * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
118 : *
119 : * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
120 : * instead.
121 : */
122 : static const struct
123 : {
124 : LOCKMODE hwlock;
125 : int lockstatus;
126 : int updstatus;
127 : }
128 :
129 : tupleLockExtraInfo[MaxLockTupleMode + 1] =
130 : {
131 : { /* LockTupleKeyShare */
132 : AccessShareLock,
133 : MultiXactStatusForKeyShare,
134 : -1 /* KeyShare does not allow updating tuples */
135 : },
136 : { /* LockTupleShare */
137 : RowShareLock,
138 : MultiXactStatusForShare,
139 : -1 /* Share does not allow updating tuples */
140 : },
141 : { /* LockTupleNoKeyExclusive */
142 : ExclusiveLock,
143 : MultiXactStatusForNoKeyUpdate,
144 : MultiXactStatusNoKeyUpdate
145 : },
146 : { /* LockTupleExclusive */
147 : AccessExclusiveLock,
148 : MultiXactStatusForUpdate,
149 : MultiXactStatusUpdate
150 : }
151 : };
152 :
153 : /* Get the LOCKMODE for a given MultiXactStatus */
154 : #define LOCKMODE_from_mxstatus(status) \
155 : (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
156 :
157 : /*
158 : * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
159 : * This is more readable than having every caller translate it to lock.h's
160 : * LOCKMODE.
161 : */
162 : #define LockTupleTuplock(rel, tup, mode) \
163 : LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
164 : #define UnlockTupleTuplock(rel, tup, mode) \
165 : UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
166 : #define ConditionalLockTupleTuplock(rel, tup, mode, log) \
167 : ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
168 :
169 : #ifdef USE_PREFETCH
170 : /*
171 : * heap_index_delete_tuples and index_delete_prefetch_buffer use this
172 : * structure to coordinate prefetching activity
173 : */
174 : typedef struct
175 : {
176 : BlockNumber cur_hblkno;
177 : int next_item;
178 : int ndeltids;
179 : TM_IndexDelete *deltids;
180 : } IndexDeletePrefetchState;
181 : #endif
182 :
183 : /* heap_index_delete_tuples bottom-up index deletion costing constants */
184 : #define BOTTOMUP_MAX_NBLOCKS 6
185 : #define BOTTOMUP_TOLERANCE_NBLOCKS 3
186 :
187 : /*
188 : * heap_index_delete_tuples uses this when determining which heap blocks it
189 : * must visit to help its bottom-up index deletion caller
190 : */
191 : typedef struct IndexDeleteCounts
192 : {
193 : int16 npromisingtids; /* Number of "promising" TIDs in group */
194 : int16 ntids; /* Number of TIDs in group */
195 : int16 ifirsttid; /* Offset to group's first deltid */
196 : } IndexDeleteCounts;
197 :
198 : /*
199 : * This table maps tuple lock strength values for each particular
200 : * MultiXactStatus value.
201 : */
202 : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
203 : {
204 : LockTupleKeyShare, /* ForKeyShare */
205 : LockTupleShare, /* ForShare */
206 : LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
207 : LockTupleExclusive, /* ForUpdate */
208 : LockTupleNoKeyExclusive, /* NoKeyUpdate */
209 : LockTupleExclusive /* Update */
210 : };
211 :
212 : /* Get the LockTupleMode for a given MultiXactStatus */
213 : #define TUPLOCK_from_mxstatus(status) \
214 : (MultiXactStatusLock[(status)])
215 :
216 : /*
217 : * Check that we have a valid snapshot if we might need TOAST access.
218 : */
219 : static inline void
220 21000370 : AssertHasSnapshotForToast(Relation rel)
221 : {
222 : #ifdef USE_ASSERT_CHECKING
223 :
224 : /* bootstrap mode in particular breaks this rule */
225 : if (!IsNormalProcessingMode())
226 : return;
227 :
228 : /* if the relation doesn't have a TOAST table, we are good */
229 : if (!OidIsValid(rel->rd_rel->reltoastrelid))
230 : return;
231 :
232 : Assert(HaveRegisteredOrActiveSnapshot());
233 :
234 : #endif /* USE_ASSERT_CHECKING */
235 21000370 : }
236 :
237 : /* ----------------------------------------------------------------
238 : * heap support routines
239 : * ----------------------------------------------------------------
240 : */
241 :
242 : /*
243 : * Streaming read API callback for parallel sequential scans. Returns the next
244 : * block the caller wants from the read stream or InvalidBlockNumber when done.
245 : */
246 : static BlockNumber
247 201642 : heap_scan_stream_read_next_parallel(ReadStream *stream,
248 : void *callback_private_data,
249 : void *per_buffer_data)
250 : {
251 201642 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
252 :
253 : Assert(ScanDirectionIsForward(scan->rs_dir));
254 : Assert(scan->rs_base.rs_parallel);
255 :
256 201642 : if (unlikely(!scan->rs_inited))
257 : {
258 : /* parallel scan */
259 2986 : table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
260 2986 : scan->rs_parallelworkerdata,
261 2986 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
262 :
263 : /* may return InvalidBlockNumber if there are no more blocks */
264 5972 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
265 2986 : scan->rs_parallelworkerdata,
266 2986 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
267 2986 : scan->rs_inited = true;
268 : }
269 : else
270 : {
271 198656 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
272 198656 : scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
273 198656 : scan->rs_base.rs_parallel);
274 : }
275 :
276 201642 : return scan->rs_prefetch_block;
277 : }
278 :
279 : /*
280 : * Streaming read API callback for serial sequential and TID range scans.
281 : * Returns the next block the caller wants from the read stream or
282 : * InvalidBlockNumber when done.
283 : */
284 : static BlockNumber
285 7499738 : heap_scan_stream_read_next_serial(ReadStream *stream,
286 : void *callback_private_data,
287 : void *per_buffer_data)
288 : {
289 7499738 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
290 :
291 7499738 : if (unlikely(!scan->rs_inited))
292 : {
293 1943268 : scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
294 1943268 : scan->rs_inited = true;
295 : }
296 : else
297 5556470 : scan->rs_prefetch_block = heapgettup_advance_block(scan,
298 : scan->rs_prefetch_block,
299 : scan->rs_dir);
300 :
301 7499738 : return scan->rs_prefetch_block;
302 : }
303 :
304 : /*
305 : * Read stream API callback for bitmap heap scans.
306 : * Returns the next block the caller wants from the read stream or
307 : * InvalidBlockNumber when done.
308 : */
309 : static BlockNumber
310 421440 : bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
311 : void *per_buffer_data)
312 : {
313 421440 : TBMIterateResult *tbmres = per_buffer_data;
314 421440 : BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
315 421440 : HeapScanDesc hscan = (HeapScanDesc) bscan;
316 421440 : TableScanDesc sscan = &hscan->rs_base;
317 :
318 : for (;;)
319 : {
320 421440 : CHECK_FOR_INTERRUPTS();
321 :
322 : /* no more entries in the bitmap */
323 421440 : if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
324 20370 : return InvalidBlockNumber;
325 :
326 : /*
327 : * Ignore any claimed entries past what we think is the end of the
328 : * relation. It may have been extended after the start of our scan (we
329 : * only hold an AccessShareLock, and it could be inserts from this
330 : * backend). We don't take this optimization in SERIALIZABLE
331 : * isolation though, as we need to examine all invisible tuples
332 : * reachable by the index.
333 : */
334 401070 : if (!IsolationIsSerializable() &&
335 400852 : tbmres->blockno >= hscan->rs_nblocks)
336 0 : continue;
337 :
338 401070 : return tbmres->blockno;
339 : }
340 :
341 : /* not reachable */
342 : Assert(false);
343 : }
344 :
345 : /* ----------------
346 : * initscan - scan code common to heap_beginscan and heap_rescan
347 : * ----------------
348 : */
349 : static void
350 1986336 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
351 : {
352 1986336 : ParallelBlockTableScanDesc bpscan = NULL;
353 : bool allow_strat;
354 : bool allow_sync;
355 :
356 : /*
357 : * Determine the number of blocks we have to scan.
358 : *
359 : * It is sufficient to do this once at scan start, since any tuples added
360 : * while the scan is in progress will be invisible to my snapshot anyway.
361 : * (That is not true when using a non-MVCC snapshot. However, we couldn't
362 : * guarantee to return tuples added after scan start anyway, since they
363 : * might go into pages we already scanned. To guarantee consistent
364 : * results for a non-MVCC snapshot, the caller must hold some higher-level
365 : * lock that ensures the interesting tuple(s) won't change.)
366 : */
367 1986336 : if (scan->rs_base.rs_parallel != NULL)
368 : {
369 4056 : bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
370 4056 : scan->rs_nblocks = bpscan->phs_nblocks;
371 : }
372 : else
373 1982280 : scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
374 :
375 : /*
376 : * If the table is large relative to NBuffers, use a bulk-read access
377 : * strategy and enable synchronized scanning (see syncscan.c). Although
378 : * the thresholds for these features could be different, we make them the
379 : * same so that there are only two behaviors to tune rather than four.
380 : * (However, some callers need to be able to disable one or both of these
381 : * behaviors, independently of the size of the table; also there is a GUC
382 : * variable that can disable synchronized scanning.)
383 : *
384 : * Note that table_block_parallelscan_initialize has a very similar test;
385 : * if you change this, consider changing that one, too.
386 : */
387 1986332 : if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
388 1971768 : scan->rs_nblocks > NBuffers / 4)
389 : {
390 27070 : allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
391 27070 : allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
392 : }
393 : else
394 1959262 : allow_strat = allow_sync = false;
395 :
396 1986332 : if (allow_strat)
397 : {
398 : /* During a rescan, keep the previous strategy object. */
399 24484 : if (scan->rs_strategy == NULL)
400 24118 : scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
401 : }
402 : else
403 : {
404 1961848 : if (scan->rs_strategy != NULL)
405 0 : FreeAccessStrategy(scan->rs_strategy);
406 1961848 : scan->rs_strategy = NULL;
407 : }
408 :
409 1986332 : if (scan->rs_base.rs_parallel != NULL)
410 : {
411 : /* For parallel scan, believe whatever ParallelTableScanDesc says. */
412 4056 : if (scan->rs_base.rs_parallel->phs_syncscan)
413 4 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
414 : else
415 4052 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
416 : }
417 1982276 : else if (keep_startblock)
418 : {
419 : /*
420 : * When rescanning, we want to keep the previous startblock setting,
421 : * so that rewinding a cursor doesn't generate surprising results.
422 : * Reset the active syncscan setting, though.
423 : */
424 1218198 : if (allow_sync && synchronize_seqscans)
425 100 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
426 : else
427 1218098 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
428 : }
429 764078 : else if (allow_sync && synchronize_seqscans)
430 : {
431 144 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
432 144 : scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
433 : }
434 : else
435 : {
436 763934 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
437 763934 : scan->rs_startblock = 0;
438 : }
439 :
440 1986332 : scan->rs_numblocks = InvalidBlockNumber;
441 1986332 : scan->rs_inited = false;
442 1986332 : scan->rs_ctup.t_data = NULL;
443 1986332 : ItemPointerSetInvalid(&scan->rs_ctup.t_self);
444 1986332 : scan->rs_cbuf = InvalidBuffer;
445 1986332 : scan->rs_cblock = InvalidBlockNumber;
446 1986332 : scan->rs_ntuples = 0;
447 1986332 : scan->rs_cindex = 0;
448 :
449 : /*
450 : * Initialize to ForwardScanDirection because it is most common and
451 : * because heap scans go forward before going backward (e.g. CURSORs).
452 : */
453 1986332 : scan->rs_dir = ForwardScanDirection;
454 1986332 : scan->rs_prefetch_block = InvalidBlockNumber;
455 :
456 : /* page-at-a-time fields are always invalid when not rs_inited */
457 :
458 : /*
459 : * copy the scan key, if appropriate
460 : */
461 1986332 : if (key != NULL && scan->rs_base.rs_nkeys > 0)
462 440312 : memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
463 :
464 : /*
465 : * Currently, we only have a stats counter for sequential heap scans (but
466 : * e.g for bitmap scans the underlying bitmap index scans will be counted,
467 : * and for sample scans we update stats for tuple fetches).
468 : */
469 1986332 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
470 1945648 : pgstat_count_heap_scan(scan->rs_base.rs_rd);
471 1986332 : }
472 :
473 : /*
474 : * heap_setscanlimits - restrict range of a heapscan
475 : *
476 : * startBlk is the page to start at
477 : * numBlks is number of pages to scan (InvalidBlockNumber means "all")
478 : */
479 : void
480 5590 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
481 : {
482 5590 : HeapScanDesc scan = (HeapScanDesc) sscan;
483 :
484 : Assert(!scan->rs_inited); /* else too late to change */
485 : /* else rs_startblock is significant */
486 : Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
487 :
488 : /* Check startBlk is valid (but allow case of zero blocks...) */
489 : Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
490 :
491 5590 : scan->rs_startblock = startBlk;
492 5590 : scan->rs_numblocks = numBlks;
493 5590 : }
494 :
495 : /*
496 : * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
497 : * multiple times, with constant arguments for all_visible,
498 : * check_serializable.
499 : */
500 : pg_attribute_always_inline
501 : static int
502 5531754 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
503 : Page page, Buffer buffer,
504 : BlockNumber block, int lines,
505 : bool all_visible, bool check_serializable)
506 : {
507 5531754 : int ntup = 0;
508 : OffsetNumber lineoff;
509 :
510 278713926 : for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
511 : {
512 273182188 : ItemId lpp = PageGetItemId(page, lineoff);
513 : HeapTupleData loctup;
514 : bool valid;
515 :
516 273182188 : if (!ItemIdIsNormal(lpp))
517 56355152 : continue;
518 :
519 216827036 : loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
520 216827036 : loctup.t_len = ItemIdGetLength(lpp);
521 216827036 : loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
522 216827036 : ItemPointerSet(&(loctup.t_self), block, lineoff);
523 :
524 216827036 : if (all_visible)
525 86460132 : valid = true;
526 : else
527 130366904 : valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
528 :
529 216827036 : if (check_serializable)
530 2818 : HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
531 : &loctup, buffer, snapshot);
532 :
533 216827020 : if (valid)
534 : {
535 200800256 : scan->rs_vistuples[ntup] = lineoff;
536 200800256 : ntup++;
537 : }
538 : }
539 :
540 : Assert(ntup <= MaxHeapTuplesPerPage);
541 :
542 5531738 : return ntup;
543 : }
544 :
545 : /*
546 : * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
547 : *
548 : * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
549 : * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
550 : */
551 : void
552 5531754 : heap_prepare_pagescan(TableScanDesc sscan)
553 : {
554 5531754 : HeapScanDesc scan = (HeapScanDesc) sscan;
555 5531754 : Buffer buffer = scan->rs_cbuf;
556 5531754 : BlockNumber block = scan->rs_cblock;
557 : Snapshot snapshot;
558 : Page page;
559 : int lines;
560 : bool all_visible;
561 : bool check_serializable;
562 :
563 : Assert(BufferGetBlockNumber(buffer) == block);
564 :
565 : /* ensure we're not accidentally being used when not in pagemode */
566 : Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
567 5531754 : snapshot = scan->rs_base.rs_snapshot;
568 :
569 : /*
570 : * Prune and repair fragmentation for the whole page, if possible.
571 : */
572 5531754 : heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
573 :
574 : /*
575 : * We must hold share lock on the buffer content while examining tuple
576 : * visibility. Afterwards, however, the tuples we have found to be
577 : * visible are guaranteed good as long as we hold the buffer pin.
578 : */
579 5531754 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
580 :
581 5531754 : page = BufferGetPage(buffer);
582 5531754 : lines = PageGetMaxOffsetNumber(page);
583 :
584 : /*
585 : * If the all-visible flag indicates that all tuples on the page are
586 : * visible to everyone, we can skip the per-tuple visibility tests.
587 : *
588 : * Note: In hot standby, a tuple that's already visible to all
589 : * transactions on the primary might still be invisible to a read-only
590 : * transaction in the standby. We partly handle this problem by tracking
591 : * the minimum xmin of visible tuples as the cut-off XID while marking a
592 : * page all-visible on the primary and WAL log that along with the
593 : * visibility map SET operation. In hot standby, we wait for (or abort)
594 : * all transactions that can potentially may not see one or more tuples on
595 : * the page. That's how index-only scans work fine in hot standby. A
596 : * crucial difference between index-only scans and heap scans is that the
597 : * index-only scan completely relies on the visibility map where as heap
598 : * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
599 : * the page-level flag can be trusted in the same way, because it might
600 : * get propagated somehow without being explicitly WAL-logged, e.g. via a
601 : * full page write. Until we can prove that beyond doubt, let's check each
602 : * tuple for visibility the hard way.
603 : */
604 5531754 : all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
605 : check_serializable =
606 5531754 : CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
607 :
608 : /*
609 : * We call page_collect_tuples() with constant arguments, to get the
610 : * compiler to constant fold the constant arguments. Separate calls with
611 : * constant arguments, rather than variables, are needed on several
612 : * compilers to actually perform constant folding.
613 : */
614 5531754 : if (likely(all_visible))
615 : {
616 2035240 : if (likely(!check_serializable))
617 2035240 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
618 : block, lines, true, false);
619 : else
620 0 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
621 : block, lines, true, true);
622 : }
623 : else
624 : {
625 3496514 : if (likely(!check_serializable))
626 3495268 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
627 : block, lines, false, false);
628 : else
629 1246 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
630 : block, lines, false, true);
631 : }
632 :
633 5531738 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
634 5531738 : }
635 :
636 : /*
637 : * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
638 : *
639 : * Read the next block of the scan relation from the read stream and save it
640 : * in the scan descriptor. It is already pinned.
641 : */
642 : static inline void
643 7332326 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
644 : {
645 : Assert(scan->rs_read_stream);
646 :
647 : /* release previous scan buffer, if any */
648 7332326 : if (BufferIsValid(scan->rs_cbuf))
649 : {
650 5386072 : ReleaseBuffer(scan->rs_cbuf);
651 5386072 : scan->rs_cbuf = InvalidBuffer;
652 : }
653 :
654 : /*
655 : * Be sure to check for interrupts at least once per page. Checks at
656 : * higher code levels won't be able to stop a seqscan that encounters many
657 : * pages' worth of consecutive dead tuples.
658 : */
659 7332326 : CHECK_FOR_INTERRUPTS();
660 :
661 : /*
662 : * If the scan direction is changing, reset the prefetch block to the
663 : * current block. Otherwise, we will incorrectly prefetch the blocks
664 : * between the prefetch block and the current block again before
665 : * prefetching blocks in the new, correct scan direction.
666 : */
667 7332322 : if (unlikely(scan->rs_dir != dir))
668 : {
669 154 : scan->rs_prefetch_block = scan->rs_cblock;
670 154 : read_stream_reset(scan->rs_read_stream);
671 : }
672 :
673 7332322 : scan->rs_dir = dir;
674 :
675 7332322 : scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
676 7332272 : if (BufferIsValid(scan->rs_cbuf))
677 5712810 : scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
678 7332272 : }
679 :
680 : /*
681 : * heapgettup_initial_block - return the first BlockNumber to scan
682 : *
683 : * Returns InvalidBlockNumber when there are no blocks to scan. This can
684 : * occur with empty tables and in parallel scans when parallel workers get all
685 : * of the pages before we can get a chance to get our first page.
686 : */
687 : static pg_noinline BlockNumber
688 1943268 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
689 : {
690 : Assert(!scan->rs_inited);
691 : Assert(scan->rs_base.rs_parallel == NULL);
692 :
693 : /* When there are no pages to scan, return InvalidBlockNumber */
694 1943268 : if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
695 939534 : return InvalidBlockNumber;
696 :
697 1003734 : if (ScanDirectionIsForward(dir))
698 : {
699 1003670 : return scan->rs_startblock;
700 : }
701 : else
702 : {
703 : /*
704 : * Disable reporting to syncscan logic in a backwards scan; it's not
705 : * very likely anyone else is doing the same thing at the same time,
706 : * and much more likely that we'll just bollix things for forward
707 : * scanners.
708 : */
709 64 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
710 :
711 : /*
712 : * Start from last page of the scan. Ensure we take into account
713 : * rs_numblocks if it's been adjusted by heap_setscanlimits().
714 : */
715 64 : if (scan->rs_numblocks != InvalidBlockNumber)
716 6 : return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
717 :
718 58 : if (scan->rs_startblock > 0)
719 0 : return scan->rs_startblock - 1;
720 :
721 58 : return scan->rs_nblocks - 1;
722 : }
723 : }
724 :
725 :
726 : /*
727 : * heapgettup_start_page - helper function for heapgettup()
728 : *
729 : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
730 : * to the number of tuples on this page. Also set *lineoff to the first
731 : * offset to scan with forward scans getting the first offset and backward
732 : * getting the final offset on the page.
733 : */
734 : static Page
735 189610 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
736 : OffsetNumber *lineoff)
737 : {
738 : Page page;
739 :
740 : Assert(scan->rs_inited);
741 : Assert(BufferIsValid(scan->rs_cbuf));
742 :
743 : /* Caller is responsible for ensuring buffer is locked if needed */
744 189610 : page = BufferGetPage(scan->rs_cbuf);
745 :
746 189610 : *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
747 :
748 189610 : if (ScanDirectionIsForward(dir))
749 189610 : *lineoff = FirstOffsetNumber;
750 : else
751 0 : *lineoff = (OffsetNumber) (*linesleft);
752 :
753 : /* lineoff now references the physically previous or next tid */
754 189610 : return page;
755 : }
756 :
757 :
758 : /*
759 : * heapgettup_continue_page - helper function for heapgettup()
760 : *
761 : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
762 : * to the number of tuples left to scan on this page. Also set *lineoff to
763 : * the next offset to scan according to the ScanDirection in 'dir'.
764 : */
765 : static inline Page
766 15682000 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
767 : OffsetNumber *lineoff)
768 : {
769 : Page page;
770 :
771 : Assert(scan->rs_inited);
772 : Assert(BufferIsValid(scan->rs_cbuf));
773 :
774 : /* Caller is responsible for ensuring buffer is locked if needed */
775 15682000 : page = BufferGetPage(scan->rs_cbuf);
776 :
777 15682000 : if (ScanDirectionIsForward(dir))
778 : {
779 15682000 : *lineoff = OffsetNumberNext(scan->rs_coffset);
780 15682000 : *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
781 : }
782 : else
783 : {
784 : /*
785 : * The previous returned tuple may have been vacuumed since the
786 : * previous scan when we use a non-MVCC snapshot, so we must
787 : * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
788 : */
789 0 : *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
790 0 : *linesleft = *lineoff;
791 : }
792 :
793 : /* lineoff now references the physically previous or next tid */
794 15682000 : return page;
795 : }
796 :
797 : /*
798 : * heapgettup_advance_block - helper for heap_fetch_next_buffer()
799 : *
800 : * Given the current block number, the scan direction, and various information
801 : * contained in the scan descriptor, calculate the BlockNumber to scan next
802 : * and return it. If there are no further blocks to scan, return
803 : * InvalidBlockNumber to indicate this fact to the caller.
804 : *
805 : * This should not be called to determine the initial block number -- only for
806 : * subsequent blocks.
807 : *
808 : * This also adjusts rs_numblocks when a limit has been imposed by
809 : * heap_setscanlimits().
810 : */
811 : static inline BlockNumber
812 5556470 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
813 : {
814 : Assert(scan->rs_base.rs_parallel == NULL);
815 :
816 5556470 : if (likely(ScanDirectionIsForward(dir)))
817 : {
818 5556352 : block++;
819 :
820 : /* wrap back to the start of the heap */
821 5556352 : if (block >= scan->rs_nblocks)
822 801354 : block = 0;
823 :
824 : /*
825 : * Report our new scan position for synchronization purposes. We don't
826 : * do that when moving backwards, however. That would just mess up any
827 : * other forward-moving scanners.
828 : *
829 : * Note: we do this before checking for end of scan so that the final
830 : * state of the position hint is back at the start of the rel. That's
831 : * not strictly necessary, but otherwise when you run the same query
832 : * multiple times the starting position would shift a little bit
833 : * backwards on every invocation, which is confusing. We don't
834 : * guarantee any specific ordering in general, though.
835 : */
836 5556352 : if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
837 22530 : ss_report_location(scan->rs_base.rs_rd, block);
838 :
839 : /* we're done if we're back at where we started */
840 5556352 : if (block == scan->rs_startblock)
841 801272 : return InvalidBlockNumber;
842 :
843 : /* check if the limit imposed by heap_setscanlimits() is met */
844 4755080 : if (scan->rs_numblocks != InvalidBlockNumber)
845 : {
846 4932 : if (--scan->rs_numblocks == 0)
847 3056 : return InvalidBlockNumber;
848 : }
849 :
850 4752024 : return block;
851 : }
852 : else
853 : {
854 : /* we're done if the last block is the start position */
855 118 : if (block == scan->rs_startblock)
856 118 : return InvalidBlockNumber;
857 :
858 : /* check if the limit imposed by heap_setscanlimits() is met */
859 0 : if (scan->rs_numblocks != InvalidBlockNumber)
860 : {
861 0 : if (--scan->rs_numblocks == 0)
862 0 : return InvalidBlockNumber;
863 : }
864 :
865 : /* wrap to the end of the heap when the last page was page 0 */
866 0 : if (block == 0)
867 0 : block = scan->rs_nblocks;
868 :
869 0 : block--;
870 :
871 0 : return block;
872 : }
873 : }
874 :
875 : /* ----------------
876 : * heapgettup - fetch next heap tuple
877 : *
878 : * Initialize the scan if not already done; then advance to the next
879 : * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
880 : * or set scan->rs_ctup.t_data = NULL if no more tuples.
881 : *
882 : * Note: the reason nkeys/key are passed separately, even though they are
883 : * kept in the scan descriptor, is that the caller may not want us to check
884 : * the scankeys.
885 : *
886 : * Note: when we fall off the end of the scan in either direction, we
887 : * reset rs_inited. This means that a further request with the same
888 : * scan direction will restart the scan, which is a bit odd, but a
889 : * request with the opposite scan direction will start a fresh scan
890 : * in the proper direction. The latter is required behavior for cursors,
891 : * while the former case is generally undefined behavior in Postgres
892 : * so we don't care too much.
893 : * ----------------
894 : */
895 : static void
896 15722818 : heapgettup(HeapScanDesc scan,
897 : ScanDirection dir,
898 : int nkeys,
899 : ScanKey key)
900 : {
901 15722818 : HeapTuple tuple = &(scan->rs_ctup);
902 : Page page;
903 : OffsetNumber lineoff;
904 : int linesleft;
905 :
906 15722818 : if (likely(scan->rs_inited))
907 : {
908 : /* continue from previously returned page/tuple */
909 15682000 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
910 15682000 : page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
911 15682000 : goto continue_page;
912 : }
913 :
914 : /*
915 : * advance the scan until we find a qualifying tuple or run out of stuff
916 : * to scan
917 : */
918 : while (true)
919 : {
920 230132 : heap_fetch_next_buffer(scan, dir);
921 :
922 : /* did we run out of blocks to scan? */
923 230132 : if (!BufferIsValid(scan->rs_cbuf))
924 40522 : break;
925 :
926 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
927 :
928 189610 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
929 189610 : page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
930 15871610 : continue_page:
931 :
932 : /*
933 : * Only continue scanning the page while we have lines left.
934 : *
935 : * Note that this protects us from accessing line pointers past
936 : * PageGetMaxOffsetNumber(); both for forward scans when we resume the
937 : * table scan, and for when we start scanning a new page.
938 : */
939 15954966 : for (; linesleft > 0; linesleft--, lineoff += dir)
940 : {
941 : bool visible;
942 15765652 : ItemId lpp = PageGetItemId(page, lineoff);
943 :
944 15765652 : if (!ItemIdIsNormal(lpp))
945 72920 : continue;
946 :
947 15692732 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
948 15692732 : tuple->t_len = ItemIdGetLength(lpp);
949 15692732 : ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
950 :
951 15692732 : visible = HeapTupleSatisfiesVisibility(tuple,
952 : scan->rs_base.rs_snapshot,
953 : scan->rs_cbuf);
954 :
955 15692732 : HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
956 : tuple, scan->rs_cbuf,
957 : scan->rs_base.rs_snapshot);
958 :
959 : /* skip tuples not visible to this snapshot */
960 15692732 : if (!visible)
961 10436 : continue;
962 :
963 : /* skip any tuples that don't match the scan key */
964 15682296 : if (key != NULL &&
965 0 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
966 : nkeys, key))
967 0 : continue;
968 :
969 15682296 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
970 15682296 : scan->rs_coffset = lineoff;
971 15682296 : return;
972 : }
973 :
974 : /*
975 : * if we get here, it means we've exhausted the items on this page and
976 : * it's time to move to the next.
977 : */
978 189314 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
979 : }
980 :
981 : /* end of scan */
982 40522 : if (BufferIsValid(scan->rs_cbuf))
983 0 : ReleaseBuffer(scan->rs_cbuf);
984 :
985 40522 : scan->rs_cbuf = InvalidBuffer;
986 40522 : scan->rs_cblock = InvalidBlockNumber;
987 40522 : scan->rs_prefetch_block = InvalidBlockNumber;
988 40522 : tuple->t_data = NULL;
989 40522 : scan->rs_inited = false;
990 : }
991 :
992 : /* ----------------
993 : * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
994 : *
995 : * Same API as heapgettup, but used in page-at-a-time mode
996 : *
997 : * The internal logic is much the same as heapgettup's too, but there are some
998 : * differences: we do not take the buffer content lock (that only needs to
999 : * happen inside heap_prepare_pagescan), and we iterate through just the
1000 : * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1001 : * that lineindex is 0-based, where the corresponding loop variable lineoff in
1002 : * heapgettup is 1-based.
1003 : * ----------------
1004 : */
1005 : static void
1006 99668572 : heapgettup_pagemode(HeapScanDesc scan,
1007 : ScanDirection dir,
1008 : int nkeys,
1009 : ScanKey key)
1010 : {
1011 99668572 : HeapTuple tuple = &(scan->rs_ctup);
1012 : Page page;
1013 : uint32 lineindex;
1014 : uint32 linesleft;
1015 :
1016 99668572 : if (likely(scan->rs_inited))
1017 : {
1018 : /* continue from previously returned page/tuple */
1019 97763136 : page = BufferGetPage(scan->rs_cbuf);
1020 :
1021 97763136 : lineindex = scan->rs_cindex + dir;
1022 97763136 : if (ScanDirectionIsForward(dir))
1023 97762478 : linesleft = scan->rs_ntuples - lineindex;
1024 : else
1025 658 : linesleft = scan->rs_cindex;
1026 : /* lineindex now references the next or previous visible tid */
1027 :
1028 97763136 : goto continue_page;
1029 : }
1030 :
1031 : /*
1032 : * advance the scan until we find a qualifying tuple or run out of stuff
1033 : * to scan
1034 : */
1035 : while (true)
1036 : {
1037 7102194 : heap_fetch_next_buffer(scan, dir);
1038 :
1039 : /* did we run out of blocks to scan? */
1040 7102140 : if (!BufferIsValid(scan->rs_cbuf))
1041 1578940 : break;
1042 :
1043 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
1044 :
1045 : /* prune the page and determine visible tuple offsets */
1046 5523200 : heap_prepare_pagescan((TableScanDesc) scan);
1047 5523184 : page = BufferGetPage(scan->rs_cbuf);
1048 5523184 : linesleft = scan->rs_ntuples;
1049 5523184 : lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
1050 :
1051 : /* block is the same for all tuples, set it once outside the loop */
1052 5523184 : ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1053 :
1054 : /* lineindex now references the next or previous visible tid */
1055 103286320 : continue_page:
1056 :
1057 199612174 : for (; linesleft > 0; linesleft--, lineindex += dir)
1058 : {
1059 : ItemId lpp;
1060 : OffsetNumber lineoff;
1061 :
1062 : Assert(lineindex <= scan->rs_ntuples);
1063 194415416 : lineoff = scan->rs_vistuples[lineindex];
1064 194415416 : lpp = PageGetItemId(page, lineoff);
1065 : Assert(ItemIdIsNormal(lpp));
1066 :
1067 194415416 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1068 194415416 : tuple->t_len = ItemIdGetLength(lpp);
1069 194415416 : ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
1070 :
1071 : /* skip any tuples that don't match the scan key */
1072 194415416 : if (key != NULL &&
1073 97135120 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1074 : nkeys, key))
1075 96325854 : continue;
1076 :
1077 98089562 : scan->rs_cindex = lineindex;
1078 98089562 : return;
1079 : }
1080 : }
1081 :
1082 : /* end of scan */
1083 1578940 : if (BufferIsValid(scan->rs_cbuf))
1084 0 : ReleaseBuffer(scan->rs_cbuf);
1085 1578940 : scan->rs_cbuf = InvalidBuffer;
1086 1578940 : scan->rs_cblock = InvalidBlockNumber;
1087 1578940 : scan->rs_prefetch_block = InvalidBlockNumber;
1088 1578940 : tuple->t_data = NULL;
1089 1578940 : scan->rs_inited = false;
1090 : }
1091 :
1092 :
1093 : /* ----------------------------------------------------------------
1094 : * heap access method interface
1095 : * ----------------------------------------------------------------
1096 : */
1097 :
1098 :
1099 : TableScanDesc
1100 768030 : heap_beginscan(Relation relation, Snapshot snapshot,
1101 : int nkeys, ScanKey key,
1102 : ParallelTableScanDesc parallel_scan,
1103 : uint32 flags)
1104 : {
1105 : HeapScanDesc scan;
1106 :
1107 : /*
1108 : * increment relation ref count while scanning relation
1109 : *
1110 : * This is just to make really sure the relcache entry won't go away while
1111 : * the scan has a pointer to it. Caller should be holding the rel open
1112 : * anyway, so this is redundant in all normal scenarios...
1113 : */
1114 768030 : RelationIncrementReferenceCount(relation);
1115 :
1116 : /*
1117 : * allocate and initialize scan descriptor
1118 : */
1119 768030 : if (flags & SO_TYPE_BITMAPSCAN)
1120 : {
1121 16198 : BitmapHeapScanDesc bscan = palloc(sizeof(BitmapHeapScanDescData));
1122 :
1123 : /*
1124 : * Bitmap Heap scans do not have any fields that a normal Heap Scan
1125 : * does not have, so no special initializations required here.
1126 : */
1127 16198 : scan = (HeapScanDesc) bscan;
1128 : }
1129 : else
1130 751832 : scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1131 :
1132 768030 : scan->rs_base.rs_rd = relation;
1133 768030 : scan->rs_base.rs_snapshot = snapshot;
1134 768030 : scan->rs_base.rs_nkeys = nkeys;
1135 768030 : scan->rs_base.rs_flags = flags;
1136 768030 : scan->rs_base.rs_parallel = parallel_scan;
1137 768030 : scan->rs_strategy = NULL; /* set in initscan */
1138 768030 : scan->rs_cbuf = InvalidBuffer;
1139 :
1140 : /*
1141 : * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1142 : */
1143 768030 : if (!(snapshot && IsMVCCSnapshot(snapshot)))
1144 58208 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1145 :
1146 : /* Check that a historic snapshot is not used for non-catalog tables */
1147 768030 : if (snapshot &&
1148 750644 : IsHistoricMVCCSnapshot(snapshot) &&
1149 1320 : !RelationIsAccessibleInLogicalDecoding(relation))
1150 : {
1151 0 : ereport(ERROR,
1152 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1153 : errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1154 : RelationGetRelationName(relation))));
1155 : }
1156 :
1157 : /*
1158 : * For seqscan and sample scans in a serializable transaction, acquire a
1159 : * predicate lock on the entire relation. This is required not only to
1160 : * lock all the matching tuples, but also to conflict with new insertions
1161 : * into the table. In an indexscan, we take page locks on the index pages
1162 : * covering the range specified in the scan qual, but in a heap scan there
1163 : * is nothing more fine-grained to lock. A bitmap scan is a different
1164 : * story, there we have already scanned the index and locked the index
1165 : * pages covering the predicate. But in that case we still have to lock
1166 : * any matching heap tuples. For sample scan we could optimize the locking
1167 : * to be at least page-level granularity, but we'd need to add per-tuple
1168 : * locking for that.
1169 : */
1170 768030 : if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
1171 : {
1172 : /*
1173 : * Ensure a missing snapshot is noticed reliably, even if the
1174 : * isolation mode means predicate locking isn't performed (and
1175 : * therefore the snapshot isn't used here).
1176 : */
1177 : Assert(snapshot);
1178 731806 : PredicateLockRelation(relation, snapshot);
1179 : }
1180 :
1181 : /* we only need to set this up once */
1182 768030 : scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1183 :
1184 : /*
1185 : * Allocate memory to keep track of page allocation for parallel workers
1186 : * when doing a parallel scan.
1187 : */
1188 768030 : if (parallel_scan != NULL)
1189 3948 : scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
1190 : else
1191 764082 : scan->rs_parallelworkerdata = NULL;
1192 :
1193 : /*
1194 : * we do this here instead of in initscan() because heap_rescan also calls
1195 : * initscan() and we don't want to allocate memory again
1196 : */
1197 768030 : if (nkeys > 0)
1198 440312 : scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1199 : else
1200 327718 : scan->rs_base.rs_key = NULL;
1201 :
1202 768030 : initscan(scan, key, false);
1203 :
1204 768026 : scan->rs_read_stream = NULL;
1205 :
1206 : /*
1207 : * Set up a read stream for sequential scans and TID range scans. This
1208 : * should be done after initscan() because initscan() allocates the
1209 : * BufferAccessStrategy object passed to the read stream API.
1210 : */
1211 768026 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1212 36370 : scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
1213 733510 : {
1214 : ReadStreamBlockNumberCB cb;
1215 :
1216 733510 : if (scan->rs_base.rs_parallel)
1217 3948 : cb = heap_scan_stream_read_next_parallel;
1218 : else
1219 729562 : cb = heap_scan_stream_read_next_serial;
1220 :
1221 : /* ---
1222 : * It is safe to use batchmode as the only locks taken by `cb`
1223 : * are never taken while waiting for IO:
1224 : * - SyncScanLock is used in the non-parallel case
1225 : * - in the parallel case, only spinlocks and atomics are used
1226 : * ---
1227 : */
1228 733510 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
1229 : READ_STREAM_USE_BATCHING,
1230 : scan->rs_strategy,
1231 : scan->rs_base.rs_rd,
1232 : MAIN_FORKNUM,
1233 : cb,
1234 : scan,
1235 : 0);
1236 : }
1237 34516 : else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1238 : {
1239 16198 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT |
1240 : READ_STREAM_USE_BATCHING,
1241 : scan->rs_strategy,
1242 : scan->rs_base.rs_rd,
1243 : MAIN_FORKNUM,
1244 : bitmapheap_stream_read_next,
1245 : scan,
1246 : sizeof(TBMIterateResult));
1247 : }
1248 :
1249 :
1250 768026 : return (TableScanDesc) scan;
1251 : }
1252 :
1253 : void
1254 1218306 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1255 : bool allow_strat, bool allow_sync, bool allow_pagemode)
1256 : {
1257 1218306 : HeapScanDesc scan = (HeapScanDesc) sscan;
1258 :
1259 1218306 : if (set_params)
1260 : {
1261 30 : if (allow_strat)
1262 30 : scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
1263 : else
1264 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
1265 :
1266 30 : if (allow_sync)
1267 12 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
1268 : else
1269 18 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
1270 :
1271 30 : if (allow_pagemode && scan->rs_base.rs_snapshot &&
1272 30 : IsMVCCSnapshot(scan->rs_base.rs_snapshot))
1273 30 : scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
1274 : else
1275 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1276 : }
1277 :
1278 : /*
1279 : * unpin scan buffers
1280 : */
1281 1218306 : if (BufferIsValid(scan->rs_cbuf))
1282 : {
1283 3266 : ReleaseBuffer(scan->rs_cbuf);
1284 3266 : scan->rs_cbuf = InvalidBuffer;
1285 : }
1286 :
1287 : /*
1288 : * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1289 : * additional data vs a normal HeapScan
1290 : */
1291 :
1292 : /*
1293 : * The read stream is reset on rescan. This must be done before
1294 : * initscan(), as some state referred to by read_stream_reset() is reset
1295 : * in initscan().
1296 : */
1297 1218306 : if (scan->rs_read_stream)
1298 1218270 : read_stream_reset(scan->rs_read_stream);
1299 :
1300 : /*
1301 : * reinitialize scan descriptor
1302 : */
1303 1218306 : initscan(scan, key, true);
1304 1218306 : }
1305 :
1306 : void
1307 763290 : heap_endscan(TableScanDesc sscan)
1308 : {
1309 763290 : HeapScanDesc scan = (HeapScanDesc) sscan;
1310 :
1311 : /* Note: no locking manipulations needed */
1312 :
1313 : /*
1314 : * unpin scan buffers
1315 : */
1316 763290 : if (BufferIsValid(scan->rs_cbuf))
1317 320212 : ReleaseBuffer(scan->rs_cbuf);
1318 :
1319 : /*
1320 : * Must free the read stream before freeing the BufferAccessStrategy.
1321 : */
1322 763290 : if (scan->rs_read_stream)
1323 745078 : read_stream_end(scan->rs_read_stream);
1324 :
1325 : /*
1326 : * decrement relation reference count and free scan descriptor storage
1327 : */
1328 763290 : RelationDecrementReferenceCount(scan->rs_base.rs_rd);
1329 :
1330 763290 : if (scan->rs_base.rs_key)
1331 440254 : pfree(scan->rs_base.rs_key);
1332 :
1333 763290 : if (scan->rs_strategy != NULL)
1334 24098 : FreeAccessStrategy(scan->rs_strategy);
1335 :
1336 763290 : if (scan->rs_parallelworkerdata != NULL)
1337 3948 : pfree(scan->rs_parallelworkerdata);
1338 :
1339 763290 : if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1340 78946 : UnregisterSnapshot(scan->rs_base.rs_snapshot);
1341 :
1342 763290 : pfree(scan);
1343 763290 : }
1344 :
1345 : HeapTuple
1346 19875966 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
1347 : {
1348 19875966 : HeapScanDesc scan = (HeapScanDesc) sscan;
1349 :
1350 : /*
1351 : * This is still widely used directly, without going through table AM, so
1352 : * add a safety check. It's possible we should, at a later point,
1353 : * downgrade this to an assert. The reason for checking the AM routine,
1354 : * rather than the AM oid, is that this allows to write regression tests
1355 : * that create another AM reusing the heap handler.
1356 : */
1357 19875966 : if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
1358 0 : ereport(ERROR,
1359 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1360 : errmsg_internal("only heap AM is supported")));
1361 :
1362 : /*
1363 : * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1364 : * for catalog or regular tables. See detailed comments in xact.c where
1365 : * these variables are declared. Normally we have such a check at tableam
1366 : * level API but this is called from many places so we need to ensure it
1367 : * here.
1368 : */
1369 19875966 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
1370 0 : elog(ERROR, "unexpected heap_getnext call during logical decoding");
1371 :
1372 : /* Note: no locking manipulations needed */
1373 :
1374 19875966 : if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1375 5112758 : heapgettup_pagemode(scan, direction,
1376 5112758 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1377 : else
1378 14763208 : heapgettup(scan, direction,
1379 14763208 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1380 :
1381 19875966 : if (scan->rs_ctup.t_data == NULL)
1382 131274 : return NULL;
1383 :
1384 : /*
1385 : * if we get here it means we have a new current scan tuple, so point to
1386 : * the proper return buffer and return the tuple.
1387 : */
1388 :
1389 19744692 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1390 :
1391 19744692 : return &scan->rs_ctup;
1392 : }
1393 :
1394 : bool
1395 95506636 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
1396 : {
1397 95506636 : HeapScanDesc scan = (HeapScanDesc) sscan;
1398 :
1399 : /* Note: no locking manipulations needed */
1400 :
1401 95506636 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1402 94547026 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1403 : else
1404 959610 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1405 :
1406 95506582 : if (scan->rs_ctup.t_data == NULL)
1407 : {
1408 1488094 : ExecClearTuple(slot);
1409 1488094 : return false;
1410 : }
1411 :
1412 : /*
1413 : * if we get here it means we have a new current scan tuple, so point to
1414 : * the proper return buffer and return the tuple.
1415 : */
1416 :
1417 94018488 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1418 :
1419 94018488 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1420 : scan->rs_cbuf);
1421 94018488 : return true;
1422 : }
1423 :
1424 : void
1425 1920 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
1426 : ItemPointer maxtid)
1427 : {
1428 1920 : HeapScanDesc scan = (HeapScanDesc) sscan;
1429 : BlockNumber startBlk;
1430 : BlockNumber numBlks;
1431 : ItemPointerData highestItem;
1432 : ItemPointerData lowestItem;
1433 :
1434 : /*
1435 : * For relations without any pages, we can simply leave the TID range
1436 : * unset. There will be no tuples to scan, therefore no tuples outside
1437 : * the given TID range.
1438 : */
1439 1920 : if (scan->rs_nblocks == 0)
1440 48 : return;
1441 :
1442 : /*
1443 : * Set up some ItemPointers which point to the first and last possible
1444 : * tuples in the heap.
1445 : */
1446 1908 : ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
1447 1908 : ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
1448 :
1449 : /*
1450 : * If the given maximum TID is below the highest possible TID in the
1451 : * relation, then restrict the range to that, otherwise we scan to the end
1452 : * of the relation.
1453 : */
1454 1908 : if (ItemPointerCompare(maxtid, &highestItem) < 0)
1455 140 : ItemPointerCopy(maxtid, &highestItem);
1456 :
1457 : /*
1458 : * If the given minimum TID is above the lowest possible TID in the
1459 : * relation, then restrict the range to only scan for TIDs above that.
1460 : */
1461 1908 : if (ItemPointerCompare(mintid, &lowestItem) > 0)
1462 1762 : ItemPointerCopy(mintid, &lowestItem);
1463 :
1464 : /*
1465 : * Check for an empty range and protect from would be negative results
1466 : * from the numBlks calculation below.
1467 : */
1468 1908 : if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
1469 : {
1470 : /* Set an empty range of blocks to scan */
1471 36 : heap_setscanlimits(sscan, 0, 0);
1472 36 : return;
1473 : }
1474 :
1475 : /*
1476 : * Calculate the first block and the number of blocks we must scan. We
1477 : * could be more aggressive here and perform some more validation to try
1478 : * and further narrow the scope of blocks to scan by checking if the
1479 : * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1480 : * advance startBlk by one. Likewise, if highestItem has an offset of 0
1481 : * we could scan one fewer blocks. However, such an optimization does not
1482 : * seem worth troubling over, currently.
1483 : */
1484 1872 : startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
1485 :
1486 1872 : numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
1487 1872 : ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
1488 :
1489 : /* Set the start block and number of blocks to scan */
1490 1872 : heap_setscanlimits(sscan, startBlk, numBlks);
1491 :
1492 : /* Finally, set the TID range in sscan */
1493 1872 : ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1494 1872 : ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1495 : }
1496 :
1497 : bool
1498 8602 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
1499 : TupleTableSlot *slot)
1500 : {
1501 8602 : HeapScanDesc scan = (HeapScanDesc) sscan;
1502 8602 : ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1503 8602 : ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1504 :
1505 : /* Note: no locking manipulations needed */
1506 : for (;;)
1507 : {
1508 8788 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1509 8788 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1510 : else
1511 0 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1512 :
1513 8772 : if (scan->rs_ctup.t_data == NULL)
1514 : {
1515 94 : ExecClearTuple(slot);
1516 94 : return false;
1517 : }
1518 :
1519 : /*
1520 : * heap_set_tidrange will have used heap_setscanlimits to limit the
1521 : * range of pages we scan to only ones that can contain the TID range
1522 : * we're scanning for. Here we must filter out any tuples from these
1523 : * pages that are outside of that range.
1524 : */
1525 8678 : if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1526 : {
1527 186 : ExecClearTuple(slot);
1528 :
1529 : /*
1530 : * When scanning backwards, the TIDs will be in descending order.
1531 : * Future tuples in this direction will be lower still, so we can
1532 : * just return false to indicate there will be no more tuples.
1533 : */
1534 186 : if (ScanDirectionIsBackward(direction))
1535 0 : return false;
1536 :
1537 186 : continue;
1538 : }
1539 :
1540 : /*
1541 : * Likewise for the final page, we must filter out TIDs greater than
1542 : * maxtid.
1543 : */
1544 8492 : if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1545 : {
1546 76 : ExecClearTuple(slot);
1547 :
1548 : /*
1549 : * When scanning forward, the TIDs will be in ascending order.
1550 : * Future tuples in this direction will be higher still, so we can
1551 : * just return false to indicate there will be no more tuples.
1552 : */
1553 76 : if (ScanDirectionIsForward(direction))
1554 76 : return false;
1555 0 : continue;
1556 : }
1557 :
1558 8416 : break;
1559 : }
1560 :
1561 : /*
1562 : * if we get here it means we have a new current scan tuple, so point to
1563 : * the proper return buffer and return the tuple.
1564 : */
1565 8416 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
1566 :
1567 8416 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1568 8416 : return true;
1569 : }
1570 :
1571 : /*
1572 : * heap_fetch - retrieve tuple with given tid
1573 : *
1574 : * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1575 : * the tuple, fill in the remaining fields of *tuple, and check the tuple
1576 : * against the specified snapshot.
1577 : *
1578 : * If successful (tuple found and passes snapshot time qual), then *userbuf
1579 : * is set to the buffer holding the tuple and true is returned. The caller
1580 : * must unpin the buffer when done with the tuple.
1581 : *
1582 : * If the tuple is not found (ie, item number references a deleted slot),
1583 : * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1584 : * and false is returned.
1585 : *
1586 : * If the tuple is found but fails the time qual check, then the behavior
1587 : * depends on the keep_buf parameter. If keep_buf is false, the results
1588 : * are the same as for the tuple-not-found case. If keep_buf is true,
1589 : * then tuple->t_data and *userbuf are returned as for the success case,
1590 : * and again the caller must unpin the buffer; but false is returned.
1591 : *
1592 : * heap_fetch does not follow HOT chains: only the exact TID requested will
1593 : * be fetched.
1594 : *
1595 : * It is somewhat inconsistent that we ereport() on invalid block number but
1596 : * return false on invalid item number. There are a couple of reasons though.
1597 : * One is that the caller can relatively easily check the block number for
1598 : * validity, but cannot check the item number without reading the page
1599 : * himself. Another is that when we are following a t_ctid link, we can be
1600 : * reasonably confident that the page number is valid (since VACUUM shouldn't
1601 : * truncate off the destination page without having killed the referencing
1602 : * tuple first), but the item number might well not be good.
1603 : */
1604 : bool
1605 355682 : heap_fetch(Relation relation,
1606 : Snapshot snapshot,
1607 : HeapTuple tuple,
1608 : Buffer *userbuf,
1609 : bool keep_buf)
1610 : {
1611 355682 : ItemPointer tid = &(tuple->t_self);
1612 : ItemId lp;
1613 : Buffer buffer;
1614 : Page page;
1615 : OffsetNumber offnum;
1616 : bool valid;
1617 :
1618 : /*
1619 : * Fetch and pin the appropriate page of the relation.
1620 : */
1621 355682 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1622 :
1623 : /*
1624 : * Need share lock on buffer to examine tuple commit status.
1625 : */
1626 355666 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
1627 355666 : page = BufferGetPage(buffer);
1628 :
1629 : /*
1630 : * We'd better check for out-of-range offnum in case of VACUUM since the
1631 : * TID was obtained.
1632 : */
1633 355666 : offnum = ItemPointerGetOffsetNumber(tid);
1634 355666 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1635 : {
1636 6 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1637 6 : ReleaseBuffer(buffer);
1638 6 : *userbuf = InvalidBuffer;
1639 6 : tuple->t_data = NULL;
1640 6 : return false;
1641 : }
1642 :
1643 : /*
1644 : * get the item line pointer corresponding to the requested tid
1645 : */
1646 355660 : lp = PageGetItemId(page, offnum);
1647 :
1648 : /*
1649 : * Must check for deleted tuple.
1650 : */
1651 355660 : if (!ItemIdIsNormal(lp))
1652 : {
1653 676 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1654 676 : ReleaseBuffer(buffer);
1655 676 : *userbuf = InvalidBuffer;
1656 676 : tuple->t_data = NULL;
1657 676 : return false;
1658 : }
1659 :
1660 : /*
1661 : * fill in *tuple fields
1662 : */
1663 354984 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1664 354984 : tuple->t_len = ItemIdGetLength(lp);
1665 354984 : tuple->t_tableOid = RelationGetRelid(relation);
1666 :
1667 : /*
1668 : * check tuple visibility, then release lock
1669 : */
1670 354984 : valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1671 :
1672 354984 : if (valid)
1673 354874 : PredicateLockTID(relation, &(tuple->t_self), snapshot,
1674 354874 : HeapTupleHeaderGetXmin(tuple->t_data));
1675 :
1676 354984 : HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1677 :
1678 354984 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1679 :
1680 354984 : if (valid)
1681 : {
1682 : /*
1683 : * All checks passed, so return the tuple as valid. Caller is now
1684 : * responsible for releasing the buffer.
1685 : */
1686 354874 : *userbuf = buffer;
1687 :
1688 354874 : return true;
1689 : }
1690 :
1691 : /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1692 110 : if (keep_buf)
1693 68 : *userbuf = buffer;
1694 : else
1695 : {
1696 42 : ReleaseBuffer(buffer);
1697 42 : *userbuf = InvalidBuffer;
1698 42 : tuple->t_data = NULL;
1699 : }
1700 :
1701 110 : return false;
1702 : }
1703 :
1704 : /*
1705 : * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1706 : *
1707 : * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1708 : * of a HOT chain), and buffer is the buffer holding this tuple. We search
1709 : * for the first chain member satisfying the given snapshot. If one is
1710 : * found, we update *tid to reference that tuple's offset number, and
1711 : * return true. If no match, return false without modifying *tid.
1712 : *
1713 : * heapTuple is a caller-supplied buffer. When a match is found, we return
1714 : * the tuple here, in addition to updating *tid. If no match is found, the
1715 : * contents of this buffer on return are undefined.
1716 : *
1717 : * If all_dead is not NULL, we check non-visible tuples to see if they are
1718 : * globally dead; *all_dead is set true if all members of the HOT chain
1719 : * are vacuumable, false if not.
1720 : *
1721 : * Unlike heap_fetch, the caller must already have pin and (at least) share
1722 : * lock on the buffer; it is still pinned/locked at exit.
1723 : */
1724 : bool
1725 44761118 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1726 : Snapshot snapshot, HeapTuple heapTuple,
1727 : bool *all_dead, bool first_call)
1728 : {
1729 44761118 : Page page = BufferGetPage(buffer);
1730 44761118 : TransactionId prev_xmax = InvalidTransactionId;
1731 : BlockNumber blkno;
1732 : OffsetNumber offnum;
1733 : bool at_chain_start;
1734 : bool valid;
1735 : bool skip;
1736 44761118 : GlobalVisState *vistest = NULL;
1737 :
1738 : /* If this is not the first call, previous call returned a (live!) tuple */
1739 44761118 : if (all_dead)
1740 38249500 : *all_dead = first_call;
1741 :
1742 44761118 : blkno = ItemPointerGetBlockNumber(tid);
1743 44761118 : offnum = ItemPointerGetOffsetNumber(tid);
1744 44761118 : at_chain_start = first_call;
1745 44761118 : skip = !first_call;
1746 :
1747 : /* XXX: we should assert that a snapshot is pushed or registered */
1748 : Assert(TransactionIdIsValid(RecentXmin));
1749 : Assert(BufferGetBlockNumber(buffer) == blkno);
1750 :
1751 : /* Scan through possible multiple members of HOT-chain */
1752 : for (;;)
1753 2860472 : {
1754 : ItemId lp;
1755 :
1756 : /* check for bogus TID */
1757 47621590 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1758 : break;
1759 :
1760 47621590 : lp = PageGetItemId(page, offnum);
1761 :
1762 : /* check for unused, dead, or redirected items */
1763 47621590 : if (!ItemIdIsNormal(lp))
1764 : {
1765 : /* We should only see a redirect at start of chain */
1766 1753776 : if (ItemIdIsRedirected(lp) && at_chain_start)
1767 : {
1768 : /* Follow the redirect */
1769 999086 : offnum = ItemIdGetRedirect(lp);
1770 999086 : at_chain_start = false;
1771 999086 : continue;
1772 : }
1773 : /* else must be end of chain */
1774 754690 : break;
1775 : }
1776 :
1777 : /*
1778 : * Update heapTuple to point to the element of the HOT chain we're
1779 : * currently investigating. Having t_self set correctly is important
1780 : * because the SSI checks and the *Satisfies routine for historical
1781 : * MVCC snapshots need the correct tid to decide about the visibility.
1782 : */
1783 45867814 : heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1784 45867814 : heapTuple->t_len = ItemIdGetLength(lp);
1785 45867814 : heapTuple->t_tableOid = RelationGetRelid(relation);
1786 45867814 : ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1787 :
1788 : /*
1789 : * Shouldn't see a HEAP_ONLY tuple at chain start.
1790 : */
1791 45867814 : if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1792 0 : break;
1793 :
1794 : /*
1795 : * The xmin should match the previous xmax value, else chain is
1796 : * broken.
1797 : */
1798 47729200 : if (TransactionIdIsValid(prev_xmax) &&
1799 1861386 : !TransactionIdEquals(prev_xmax,
1800 : HeapTupleHeaderGetXmin(heapTuple->t_data)))
1801 0 : break;
1802 :
1803 : /*
1804 : * When first_call is true (and thus, skip is initially false) we'll
1805 : * return the first tuple we find. But on later passes, heapTuple
1806 : * will initially be pointing to the tuple we returned last time.
1807 : * Returning it again would be incorrect (and would loop forever), so
1808 : * we skip it and return the next match we find.
1809 : */
1810 45867814 : if (!skip)
1811 : {
1812 : /* If it's visible per the snapshot, we must return it */
1813 45697662 : valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1814 45697662 : HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1815 : buffer, snapshot);
1816 :
1817 45697652 : if (valid)
1818 : {
1819 31585446 : ItemPointerSetOffsetNumber(tid, offnum);
1820 31585446 : PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1821 31585446 : HeapTupleHeaderGetXmin(heapTuple->t_data));
1822 31585446 : if (all_dead)
1823 25645630 : *all_dead = false;
1824 31585446 : return true;
1825 : }
1826 : }
1827 14282358 : skip = false;
1828 :
1829 : /*
1830 : * If we can't see it, maybe no one else can either. At caller
1831 : * request, check whether all chain members are dead to all
1832 : * transactions.
1833 : *
1834 : * Note: if you change the criterion here for what is "dead", fix the
1835 : * planner's get_actual_variable_range() function to match.
1836 : */
1837 14282358 : if (all_dead && *all_dead)
1838 : {
1839 12901154 : if (!vistest)
1840 12622868 : vistest = GlobalVisTestFor(relation);
1841 :
1842 12901154 : if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1843 12159246 : *all_dead = false;
1844 : }
1845 :
1846 : /*
1847 : * Check to see if HOT chain continues past this tuple; if so fetch
1848 : * the next offnum and loop around.
1849 : */
1850 14282358 : if (HeapTupleIsHotUpdated(heapTuple))
1851 : {
1852 : Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1853 : blkno);
1854 1861386 : offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1855 1861386 : at_chain_start = false;
1856 1861386 : prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1857 : }
1858 : else
1859 12420972 : break; /* end of chain */
1860 : }
1861 :
1862 13175662 : return false;
1863 : }
1864 :
1865 : /*
1866 : * heap_get_latest_tid - get the latest tid of a specified tuple
1867 : *
1868 : * Actually, this gets the latest version that is visible according to the
1869 : * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1870 : * possibly uncommitted version.
1871 : *
1872 : * *tid is both an input and an output parameter: it is updated to
1873 : * show the latest version of the row. Note that it will not be changed
1874 : * if no version of the row passes the snapshot test.
1875 : */
1876 : void
1877 300 : heap_get_latest_tid(TableScanDesc sscan,
1878 : ItemPointer tid)
1879 : {
1880 300 : Relation relation = sscan->rs_rd;
1881 300 : Snapshot snapshot = sscan->rs_snapshot;
1882 : ItemPointerData ctid;
1883 : TransactionId priorXmax;
1884 :
1885 : /*
1886 : * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1887 : * Assume that t_ctid links are valid however - there shouldn't be invalid
1888 : * ones in the table.
1889 : */
1890 : Assert(ItemPointerIsValid(tid));
1891 :
1892 : /*
1893 : * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1894 : * need to examine, and *tid is the TID we will return if ctid turns out
1895 : * to be bogus.
1896 : *
1897 : * Note that we will loop until we reach the end of the t_ctid chain.
1898 : * Depending on the snapshot passed, there might be at most one visible
1899 : * version of the row, but we don't try to optimize for that.
1900 : */
1901 300 : ctid = *tid;
1902 300 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1903 : for (;;)
1904 90 : {
1905 : Buffer buffer;
1906 : Page page;
1907 : OffsetNumber offnum;
1908 : ItemId lp;
1909 : HeapTupleData tp;
1910 : bool valid;
1911 :
1912 : /*
1913 : * Read, pin, and lock the page.
1914 : */
1915 390 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1916 390 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
1917 390 : page = BufferGetPage(buffer);
1918 :
1919 : /*
1920 : * Check for bogus item number. This is not treated as an error
1921 : * condition because it can happen while following a t_ctid link. We
1922 : * just assume that the prior tid is OK and return it unchanged.
1923 : */
1924 390 : offnum = ItemPointerGetOffsetNumber(&ctid);
1925 390 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1926 : {
1927 0 : UnlockReleaseBuffer(buffer);
1928 0 : break;
1929 : }
1930 390 : lp = PageGetItemId(page, offnum);
1931 390 : if (!ItemIdIsNormal(lp))
1932 : {
1933 0 : UnlockReleaseBuffer(buffer);
1934 0 : break;
1935 : }
1936 :
1937 : /* OK to access the tuple */
1938 390 : tp.t_self = ctid;
1939 390 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1940 390 : tp.t_len = ItemIdGetLength(lp);
1941 390 : tp.t_tableOid = RelationGetRelid(relation);
1942 :
1943 : /*
1944 : * After following a t_ctid link, we might arrive at an unrelated
1945 : * tuple. Check for XMIN match.
1946 : */
1947 480 : if (TransactionIdIsValid(priorXmax) &&
1948 90 : !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1949 : {
1950 0 : UnlockReleaseBuffer(buffer);
1951 0 : break;
1952 : }
1953 :
1954 : /*
1955 : * Check tuple visibility; if visible, set it as the new result
1956 : * candidate.
1957 : */
1958 390 : valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1959 390 : HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1960 390 : if (valid)
1961 276 : *tid = ctid;
1962 :
1963 : /*
1964 : * If there's a valid t_ctid link, follow it, else we're done.
1965 : */
1966 552 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1967 276 : HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
1968 228 : HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
1969 114 : ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1970 : {
1971 300 : UnlockReleaseBuffer(buffer);
1972 300 : break;
1973 : }
1974 :
1975 90 : ctid = tp.t_data->t_ctid;
1976 90 : priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1977 90 : UnlockReleaseBuffer(buffer);
1978 : } /* end of loop */
1979 300 : }
1980 :
1981 :
1982 : /*
1983 : * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1984 : *
1985 : * This is called after we have waited for the XMAX transaction to terminate.
1986 : * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1987 : * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1988 : * hint bit if possible --- but beware that that may not yet be possible,
1989 : * if the transaction committed asynchronously.
1990 : *
1991 : * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
1992 : * even if it commits.
1993 : *
1994 : * Hence callers should look only at XMAX_INVALID.
1995 : *
1996 : * Note this is not allowed for tuples whose xmax is a multixact.
1997 : */
1998 : static void
1999 426 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
2000 : {
2001 : Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
2002 : Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
2003 :
2004 426 : if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
2005 : {
2006 762 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2007 336 : TransactionIdDidCommit(xid))
2008 282 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
2009 : xid);
2010 : else
2011 144 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
2012 : InvalidTransactionId);
2013 : }
2014 426 : }
2015 :
2016 :
2017 : /*
2018 : * GetBulkInsertState - prepare status object for a bulk insert
2019 : */
2020 : BulkInsertState
2021 4662 : GetBulkInsertState(void)
2022 : {
2023 : BulkInsertState bistate;
2024 :
2025 4662 : bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2026 4662 : bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
2027 4662 : bistate->current_buf = InvalidBuffer;
2028 4662 : bistate->next_free = InvalidBlockNumber;
2029 4662 : bistate->last_free = InvalidBlockNumber;
2030 4662 : bistate->already_extended_by = 0;
2031 4662 : return bistate;
2032 : }
2033 :
2034 : /*
2035 : * FreeBulkInsertState - clean up after finishing a bulk insert
2036 : */
2037 : void
2038 4374 : FreeBulkInsertState(BulkInsertState bistate)
2039 : {
2040 4374 : if (bistate->current_buf != InvalidBuffer)
2041 3508 : ReleaseBuffer(bistate->current_buf);
2042 4374 : FreeAccessStrategy(bistate->strategy);
2043 4374 : pfree(bistate);
2044 4374 : }
2045 :
2046 : /*
2047 : * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
2048 : */
2049 : void
2050 161516 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
2051 : {
2052 161516 : if (bistate->current_buf != InvalidBuffer)
2053 60042 : ReleaseBuffer(bistate->current_buf);
2054 161516 : bistate->current_buf = InvalidBuffer;
2055 :
2056 : /*
2057 : * Despite the name, we also reset bulk relation extension state.
2058 : * Otherwise we can end up erroring out due to looking for free space in
2059 : * ->next_free of one partition, even though ->next_free was set when
2060 : * extending another partition. It could obviously also be bad for
2061 : * efficiency to look at existing blocks at offsets from another
2062 : * partition, even if we don't error out.
2063 : */
2064 161516 : bistate->next_free = InvalidBlockNumber;
2065 161516 : bistate->last_free = InvalidBlockNumber;
2066 161516 : }
2067 :
2068 :
2069 : /*
2070 : * heap_insert - insert tuple into a heap
2071 : *
2072 : * The new tuple is stamped with current transaction ID and the specified
2073 : * command ID.
2074 : *
2075 : * See table_tuple_insert for comments about most of the input flags, except
2076 : * that this routine directly takes a tuple rather than a slot.
2077 : *
2078 : * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
2079 : * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
2080 : * implement table_tuple_insert_speculative().
2081 : *
2082 : * On return the header fields of *tup are updated to match the stored tuple;
2083 : * in particular tup->t_self receives the actual TID where the tuple was
2084 : * stored. But note that any toasting of fields within the tuple data is NOT
2085 : * reflected into *tup.
2086 : */
2087 : void
2088 16686392 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2089 : int options, BulkInsertState bistate)
2090 : {
2091 16686392 : TransactionId xid = GetCurrentTransactionId();
2092 : HeapTuple heaptup;
2093 : Buffer buffer;
2094 16686378 : Buffer vmbuffer = InvalidBuffer;
2095 16686378 : bool all_visible_cleared = false;
2096 :
2097 : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2098 : Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
2099 : RelationGetNumberOfAttributes(relation));
2100 :
2101 16686378 : AssertHasSnapshotForToast(relation);
2102 :
2103 : /*
2104 : * Fill in tuple header fields and toast the tuple if necessary.
2105 : *
2106 : * Note: below this point, heaptup is the data we actually intend to store
2107 : * into the relation; tup is the caller's original untoasted data.
2108 : */
2109 16686378 : heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2110 :
2111 : /*
2112 : * Find buffer to insert this tuple into. If the page is all visible,
2113 : * this will also pin the requisite visibility map page.
2114 : */
2115 16686378 : buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2116 : InvalidBuffer, options, bistate,
2117 : &vmbuffer, NULL,
2118 : 0);
2119 :
2120 : /*
2121 : * We're about to do the actual insert -- but check for conflict first, to
2122 : * avoid possibly having to roll back work we've just done.
2123 : *
2124 : * This is safe without a recheck as long as there is no possibility of
2125 : * another process scanning the page between this check and the insert
2126 : * being visible to the scan (i.e., an exclusive buffer content lock is
2127 : * continuously held from this point until the tuple insert is visible).
2128 : *
2129 : * For a heap insert, we only need to check for table-level SSI locks. Our
2130 : * new tuple can't possibly conflict with existing tuple locks, and heap
2131 : * page locks are only consolidated versions of tuple locks; they do not
2132 : * lock "gaps" as index page locks do. So we don't need to specify a
2133 : * buffer when making the call, which makes for a faster check.
2134 : */
2135 16686378 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2136 :
2137 : /* NO EREPORT(ERROR) from here till changes are logged */
2138 16686354 : START_CRIT_SECTION();
2139 :
2140 16686354 : RelationPutHeapTuple(relation, buffer, heaptup,
2141 16686354 : (options & HEAP_INSERT_SPECULATIVE) != 0);
2142 :
2143 16686354 : if (PageIsAllVisible(BufferGetPage(buffer)))
2144 : {
2145 14936 : all_visible_cleared = true;
2146 14936 : PageClearAllVisible(BufferGetPage(buffer));
2147 14936 : visibilitymap_clear(relation,
2148 14936 : ItemPointerGetBlockNumber(&(heaptup->t_self)),
2149 : vmbuffer, VISIBILITYMAP_VALID_BITS);
2150 : }
2151 :
2152 : /*
2153 : * XXX Should we set PageSetPrunable on this page ?
2154 : *
2155 : * The inserting transaction may eventually abort thus making this tuple
2156 : * DEAD and hence available for pruning. Though we don't want to optimize
2157 : * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2158 : * aborted tuple will never be pruned until next vacuum is triggered.
2159 : *
2160 : * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2161 : */
2162 :
2163 16686354 : MarkBufferDirty(buffer);
2164 :
2165 : /* XLOG stuff */
2166 16686354 : if (RelationNeedsWAL(relation))
2167 : {
2168 : xl_heap_insert xlrec;
2169 : xl_heap_header xlhdr;
2170 : XLogRecPtr recptr;
2171 14174324 : Page page = BufferGetPage(buffer);
2172 14174324 : uint8 info = XLOG_HEAP_INSERT;
2173 14174324 : int bufflags = 0;
2174 :
2175 : /*
2176 : * If this is a catalog, we need to transmit combo CIDs to properly
2177 : * decode, so log that as well.
2178 : */
2179 14174324 : if (RelationIsAccessibleInLogicalDecoding(relation))
2180 6796 : log_heap_new_cid(relation, heaptup);
2181 :
2182 : /*
2183 : * If this is the single and first tuple on page, we can reinit the
2184 : * page instead of restoring the whole thing. Set flag, and hide
2185 : * buffer references from XLogInsert.
2186 : */
2187 14356852 : if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2188 182528 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2189 : {
2190 180710 : info |= XLOG_HEAP_INIT_PAGE;
2191 180710 : bufflags |= REGBUF_WILL_INIT;
2192 : }
2193 :
2194 14174324 : xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2195 14174324 : xlrec.flags = 0;
2196 14174324 : if (all_visible_cleared)
2197 14930 : xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
2198 14174324 : if (options & HEAP_INSERT_SPECULATIVE)
2199 4122 : xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
2200 : Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
2201 :
2202 : /*
2203 : * For logical decoding, we need the tuple even if we're doing a full
2204 : * page write, so make sure it's included even if we take a full-page
2205 : * image. (XXX We could alternatively store a pointer into the FPW).
2206 : */
2207 14174324 : if (RelationIsLogicallyLogged(relation) &&
2208 499810 : !(options & HEAP_INSERT_NO_LOGICAL))
2209 : {
2210 499756 : xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2211 499756 : bufflags |= REGBUF_KEEP_DATA;
2212 :
2213 499756 : if (IsToastRelation(relation))
2214 3572 : xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
2215 : }
2216 :
2217 14174324 : XLogBeginInsert();
2218 14174324 : XLogRegisterData(&xlrec, SizeOfHeapInsert);
2219 :
2220 14174324 : xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2221 14174324 : xlhdr.t_infomask = heaptup->t_data->t_infomask;
2222 14174324 : xlhdr.t_hoff = heaptup->t_data->t_hoff;
2223 :
2224 : /*
2225 : * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2226 : * write the whole page to the xlog, we don't need to store
2227 : * xl_heap_header in the xlog.
2228 : */
2229 14174324 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2230 14174324 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
2231 : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2232 14174324 : XLogRegisterBufData(0,
2233 14174324 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2234 14174324 : heaptup->t_len - SizeofHeapTupleHeader);
2235 :
2236 : /* filtering by origin on a row level is much more efficient */
2237 14174324 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2238 :
2239 14174324 : recptr = XLogInsert(RM_HEAP_ID, info);
2240 :
2241 14174324 : PageSetLSN(page, recptr);
2242 : }
2243 :
2244 16686354 : END_CRIT_SECTION();
2245 :
2246 16686354 : UnlockReleaseBuffer(buffer);
2247 16686354 : if (vmbuffer != InvalidBuffer)
2248 15496 : ReleaseBuffer(vmbuffer);
2249 :
2250 : /*
2251 : * If tuple is cachable, mark it for invalidation from the caches in case
2252 : * we abort. Note it is OK to do this after releasing the buffer, because
2253 : * the heaptup data structure is all in local memory, not in the shared
2254 : * buffer.
2255 : */
2256 16686354 : CacheInvalidateHeapTuple(relation, heaptup, NULL);
2257 :
2258 : /* Note: speculative insertions are counted too, even if aborted later */
2259 16686354 : pgstat_count_heap_insert(relation, 1);
2260 :
2261 : /*
2262 : * If heaptup is a private copy, release it. Don't forget to copy t_self
2263 : * back to the caller's image, too.
2264 : */
2265 16686354 : if (heaptup != tup)
2266 : {
2267 36436 : tup->t_self = heaptup->t_self;
2268 36436 : heap_freetuple(heaptup);
2269 : }
2270 16686354 : }
2271 :
2272 : /*
2273 : * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2274 : * tuple header fields and toasts the tuple if necessary. Returns a toasted
2275 : * version of the tuple if it was toasted, or the original tuple if not. Note
2276 : * that in any case, the header fields are also set in the original tuple.
2277 : */
2278 : static HeapTuple
2279 19632228 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2280 : CommandId cid, int options)
2281 : {
2282 : /*
2283 : * To allow parallel inserts, we need to ensure that they are safe to be
2284 : * performed in workers. We have the infrastructure to allow parallel
2285 : * inserts in general except for the cases where inserts generate a new
2286 : * CommandId (eg. inserts into a table having a foreign key column).
2287 : */
2288 19632228 : if (IsParallelWorker())
2289 0 : ereport(ERROR,
2290 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2291 : errmsg("cannot insert tuples in a parallel worker")));
2292 :
2293 19632228 : tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2294 19632228 : tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2295 19632228 : tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
2296 19632228 : HeapTupleHeaderSetXmin(tup->t_data, xid);
2297 19632228 : if (options & HEAP_INSERT_FROZEN)
2298 204192 : HeapTupleHeaderSetXminFrozen(tup->t_data);
2299 :
2300 19632228 : HeapTupleHeaderSetCmin(tup->t_data, cid);
2301 19632228 : HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
2302 19632228 : tup->t_tableOid = RelationGetRelid(relation);
2303 :
2304 : /*
2305 : * If the new tuple is too big for storage or contains already toasted
2306 : * out-of-line attributes from some other relation, invoke the toaster.
2307 : */
2308 19632228 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
2309 62050 : relation->rd_rel->relkind != RELKIND_MATVIEW)
2310 : {
2311 : /* toast table entries should never be recursively toasted */
2312 : Assert(!HeapTupleHasExternal(tup));
2313 61954 : return tup;
2314 : }
2315 19570274 : else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2316 36524 : return heap_toast_insert_or_update(relation, tup, NULL, options);
2317 : else
2318 19533750 : return tup;
2319 : }
2320 :
2321 : /*
2322 : * Helper for heap_multi_insert() that computes the number of entire pages
2323 : * that inserting the remaining heaptuples requires. Used to determine how
2324 : * much the relation needs to be extended by.
2325 : */
2326 : static int
2327 728856 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
2328 : {
2329 728856 : size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2330 728856 : int npages = 1;
2331 :
2332 4847132 : for (int i = done; i < ntuples; i++)
2333 : {
2334 4118276 : size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2335 :
2336 4118276 : if (page_avail < tup_sz)
2337 : {
2338 30950 : npages++;
2339 30950 : page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2340 : }
2341 4118276 : page_avail -= tup_sz;
2342 : }
2343 :
2344 728856 : return npages;
2345 : }
2346 :
2347 : /*
2348 : * heap_multi_insert - insert multiple tuples into a heap
2349 : *
2350 : * This is like heap_insert(), but inserts multiple tuples in one operation.
2351 : * That's faster than calling heap_insert() in a loop, because when multiple
2352 : * tuples can be inserted on a single page, we can write just a single WAL
2353 : * record covering all of them, and only need to lock/unlock the page once.
2354 : *
2355 : * Note: this leaks memory into the current memory context. You can create a
2356 : * temporary context before calling this, if that's a problem.
2357 : */
2358 : void
2359 715958 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2360 : CommandId cid, int options, BulkInsertState bistate)
2361 : {
2362 715958 : TransactionId xid = GetCurrentTransactionId();
2363 : HeapTuple *heaptuples;
2364 : int i;
2365 : int ndone;
2366 : PGAlignedBlock scratch;
2367 : Page page;
2368 715958 : Buffer vmbuffer = InvalidBuffer;
2369 : bool needwal;
2370 : Size saveFreeSpace;
2371 715958 : bool need_tuple_data = RelationIsLogicallyLogged(relation);
2372 715958 : bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
2373 715958 : bool starting_with_empty_page = false;
2374 715958 : int npages = 0;
2375 715958 : int npages_used = 0;
2376 :
2377 : /* currently not needed (thus unsupported) for heap_multi_insert() */
2378 : Assert(!(options & HEAP_INSERT_NO_LOGICAL));
2379 :
2380 715958 : AssertHasSnapshotForToast(relation);
2381 :
2382 715958 : needwal = RelationNeedsWAL(relation);
2383 715958 : saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2384 : HEAP_DEFAULT_FILLFACTOR);
2385 :
2386 : /* Toast and set header data in all the slots */
2387 715958 : heaptuples = palloc(ntuples * sizeof(HeapTuple));
2388 3661808 : for (i = 0; i < ntuples; i++)
2389 : {
2390 : HeapTuple tuple;
2391 :
2392 2945850 : tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2393 2945850 : slots[i]->tts_tableOid = RelationGetRelid(relation);
2394 2945850 : tuple->t_tableOid = slots[i]->tts_tableOid;
2395 2945850 : heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2396 : options);
2397 : }
2398 :
2399 : /*
2400 : * We're about to do the actual inserts -- but check for conflict first,
2401 : * to minimize the possibility of having to roll back work we've just
2402 : * done.
2403 : *
2404 : * A check here does not definitively prevent a serialization anomaly;
2405 : * that check MUST be done at least past the point of acquiring an
2406 : * exclusive buffer content lock on every buffer that will be affected,
2407 : * and MAY be done after all inserts are reflected in the buffers and
2408 : * those locks are released; otherwise there is a race condition. Since
2409 : * multiple buffers can be locked and unlocked in the loop below, and it
2410 : * would not be feasible to identify and lock all of those buffers before
2411 : * the loop, we must do a final check at the end.
2412 : *
2413 : * The check here could be omitted with no loss of correctness; it is
2414 : * present strictly as an optimization.
2415 : *
2416 : * For heap inserts, we only need to check for table-level SSI locks. Our
2417 : * new tuples can't possibly conflict with existing tuple locks, and heap
2418 : * page locks are only consolidated versions of tuple locks; they do not
2419 : * lock "gaps" as index page locks do. So we don't need to specify a
2420 : * buffer when making the call, which makes for a faster check.
2421 : */
2422 715958 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2423 :
2424 715958 : ndone = 0;
2425 1461114 : while (ndone < ntuples)
2426 : {
2427 : Buffer buffer;
2428 745156 : bool all_visible_cleared = false;
2429 745156 : bool all_frozen_set = false;
2430 : int nthispage;
2431 :
2432 745156 : CHECK_FOR_INTERRUPTS();
2433 :
2434 : /*
2435 : * Compute number of pages needed to fit the to-be-inserted tuples in
2436 : * the worst case. This will be used to determine how much to extend
2437 : * the relation by in RelationGetBufferForTuple(), if needed. If we
2438 : * filled a prior page from scratch, we can just update our last
2439 : * computation, but if we started with a partially filled page,
2440 : * recompute from scratch, the number of potentially required pages
2441 : * can vary due to tuples needing to fit onto the page, page headers
2442 : * etc.
2443 : */
2444 745156 : if (ndone == 0 || !starting_with_empty_page)
2445 : {
2446 728856 : npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2447 : saveFreeSpace);
2448 728856 : npages_used = 0;
2449 : }
2450 : else
2451 16300 : npages_used++;
2452 :
2453 : /*
2454 : * Find buffer where at least the next tuple will fit. If the page is
2455 : * all-visible, this will also pin the requisite visibility map page.
2456 : *
2457 : * Also pin visibility map page if COPY FREEZE inserts tuples into an
2458 : * empty page. See all_frozen_set below.
2459 : */
2460 745156 : buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2461 : InvalidBuffer, options, bistate,
2462 : &vmbuffer, NULL,
2463 : npages - npages_used);
2464 745156 : page = BufferGetPage(buffer);
2465 :
2466 745156 : starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
2467 :
2468 745156 : if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
2469 : {
2470 3322 : all_frozen_set = true;
2471 : /* Lock the vmbuffer before entering the critical section */
2472 3322 : LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
2473 : }
2474 :
2475 : /* NO EREPORT(ERROR) from here till changes are logged */
2476 745156 : START_CRIT_SECTION();
2477 :
2478 : /*
2479 : * RelationGetBufferForTuple has ensured that the first tuple fits.
2480 : * Put that on the page, and then as many other tuples as fit.
2481 : */
2482 745156 : RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2483 :
2484 : /*
2485 : * For logical decoding we need combo CIDs to properly decode the
2486 : * catalog.
2487 : */
2488 745156 : if (needwal && need_cids)
2489 10012 : log_heap_new_cid(relation, heaptuples[ndone]);
2490 :
2491 2945850 : for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2492 : {
2493 2229892 : HeapTuple heaptup = heaptuples[ndone + nthispage];
2494 :
2495 2229892 : if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
2496 29198 : break;
2497 :
2498 2200694 : RelationPutHeapTuple(relation, buffer, heaptup, false);
2499 :
2500 : /*
2501 : * For logical decoding we need combo CIDs to properly decode the
2502 : * catalog.
2503 : */
2504 2200694 : if (needwal && need_cids)
2505 9346 : log_heap_new_cid(relation, heaptup);
2506 : }
2507 :
2508 : /*
2509 : * If the page is all visible, need to clear that, unless we're only
2510 : * going to add further frozen rows to it.
2511 : *
2512 : * If we're only adding already frozen rows to a previously empty
2513 : * page, mark it as all-frozen and update the visibility map. We're
2514 : * already holding a pin on the vmbuffer.
2515 : */
2516 745156 : if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
2517 : {
2518 5960 : all_visible_cleared = true;
2519 5960 : PageClearAllVisible(page);
2520 5960 : visibilitymap_clear(relation,
2521 : BufferGetBlockNumber(buffer),
2522 : vmbuffer, VISIBILITYMAP_VALID_BITS);
2523 : }
2524 739196 : else if (all_frozen_set)
2525 : {
2526 3322 : PageSetAllVisible(page);
2527 3322 : visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
2528 : vmbuffer,
2529 : VISIBILITYMAP_ALL_VISIBLE |
2530 : VISIBILITYMAP_ALL_FROZEN,
2531 : relation->rd_locator);
2532 : }
2533 :
2534 : /*
2535 : * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2536 : */
2537 :
2538 745156 : MarkBufferDirty(buffer);
2539 :
2540 : /* XLOG stuff */
2541 745156 : if (needwal)
2542 : {
2543 : XLogRecPtr recptr;
2544 : xl_heap_multi_insert *xlrec;
2545 737500 : uint8 info = XLOG_HEAP2_MULTI_INSERT;
2546 : char *tupledata;
2547 : int totaldatalen;
2548 737500 : char *scratchptr = scratch.data;
2549 : bool init;
2550 737500 : int bufflags = 0;
2551 :
2552 : /*
2553 : * If the page was previously empty, we can reinit the page
2554 : * instead of restoring the whole thing.
2555 : */
2556 737500 : init = starting_with_empty_page;
2557 :
2558 : /* allocate xl_heap_multi_insert struct from the scratch area */
2559 737500 : xlrec = (xl_heap_multi_insert *) scratchptr;
2560 737500 : scratchptr += SizeOfHeapMultiInsert;
2561 :
2562 : /*
2563 : * Allocate offsets array. Unless we're reinitializing the page,
2564 : * in that case the tuples are stored in order starting at
2565 : * FirstOffsetNumber and we don't need to store the offsets
2566 : * explicitly.
2567 : */
2568 737500 : if (!init)
2569 711392 : scratchptr += nthispage * sizeof(OffsetNumber);
2570 :
2571 : /* the rest of the scratch space is used for tuple data */
2572 737500 : tupledata = scratchptr;
2573 :
2574 : /* check that the mutually exclusive flags are not both set */
2575 : Assert(!(all_visible_cleared && all_frozen_set));
2576 :
2577 737500 : xlrec->flags = 0;
2578 737500 : if (all_visible_cleared)
2579 5960 : xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
2580 :
2581 : /*
2582 : * We don't have to worry about including a conflict xid in the
2583 : * WAL record, as HEAP_INSERT_FROZEN intentionally violates
2584 : * visibility rules.
2585 : */
2586 737500 : if (all_frozen_set)
2587 26 : xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
2588 :
2589 737500 : xlrec->ntuples = nthispage;
2590 :
2591 : /*
2592 : * Write out an xl_multi_insert_tuple and the tuple data itself
2593 : * for each tuple.
2594 : */
2595 3272520 : for (i = 0; i < nthispage; i++)
2596 : {
2597 2535020 : HeapTuple heaptup = heaptuples[ndone + i];
2598 : xl_multi_insert_tuple *tuphdr;
2599 : int datalen;
2600 :
2601 2535020 : if (!init)
2602 1487910 : xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2603 : /* xl_multi_insert_tuple needs two-byte alignment. */
2604 2535020 : tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2605 2535020 : scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2606 :
2607 2535020 : tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2608 2535020 : tuphdr->t_infomask = heaptup->t_data->t_infomask;
2609 2535020 : tuphdr->t_hoff = heaptup->t_data->t_hoff;
2610 :
2611 : /* write bitmap [+ padding] [+ oid] + data */
2612 2535020 : datalen = heaptup->t_len - SizeofHeapTupleHeader;
2613 2535020 : memcpy(scratchptr,
2614 2535020 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2615 : datalen);
2616 2535020 : tuphdr->datalen = datalen;
2617 2535020 : scratchptr += datalen;
2618 : }
2619 737500 : totaldatalen = scratchptr - tupledata;
2620 : Assert((scratchptr - scratch.data) < BLCKSZ);
2621 :
2622 737500 : if (need_tuple_data)
2623 144 : xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2624 :
2625 : /*
2626 : * Signal that this is the last xl_heap_multi_insert record
2627 : * emitted by this call to heap_multi_insert(). Needed for logical
2628 : * decoding so it knows when to cleanup temporary data.
2629 : */
2630 737500 : if (ndone + nthispage == ntuples)
2631 715120 : xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
2632 :
2633 737500 : if (init)
2634 : {
2635 26108 : info |= XLOG_HEAP_INIT_PAGE;
2636 26108 : bufflags |= REGBUF_WILL_INIT;
2637 : }
2638 :
2639 : /*
2640 : * If we're doing logical decoding, include the new tuple data
2641 : * even if we take a full-page image of the page.
2642 : */
2643 737500 : if (need_tuple_data)
2644 144 : bufflags |= REGBUF_KEEP_DATA;
2645 :
2646 737500 : XLogBeginInsert();
2647 737500 : XLogRegisterData(xlrec, tupledata - scratch.data);
2648 737500 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2649 737500 : if (all_frozen_set)
2650 26 : XLogRegisterBuffer(1, vmbuffer, 0);
2651 :
2652 737500 : XLogRegisterBufData(0, tupledata, totaldatalen);
2653 :
2654 : /* filtering by origin on a row level is much more efficient */
2655 737500 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2656 :
2657 737500 : recptr = XLogInsert(RM_HEAP2_ID, info);
2658 :
2659 737500 : PageSetLSN(page, recptr);
2660 737500 : if (all_frozen_set)
2661 : {
2662 : Assert(BufferIsDirty(vmbuffer));
2663 26 : PageSetLSN(BufferGetPage(vmbuffer), recptr);
2664 : }
2665 : }
2666 :
2667 745156 : END_CRIT_SECTION();
2668 :
2669 745156 : if (all_frozen_set)
2670 3322 : LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2671 :
2672 745156 : UnlockReleaseBuffer(buffer);
2673 745156 : ndone += nthispage;
2674 :
2675 : /*
2676 : * NB: Only release vmbuffer after inserting all tuples - it's fairly
2677 : * likely that we'll insert into subsequent heap pages that are likely
2678 : * to use the same vm page.
2679 : */
2680 : }
2681 :
2682 : /* We're done with inserting all tuples, so release the last vmbuffer. */
2683 715958 : if (vmbuffer != InvalidBuffer)
2684 6128 : ReleaseBuffer(vmbuffer);
2685 :
2686 : /*
2687 : * We're done with the actual inserts. Check for conflicts again, to
2688 : * ensure that all rw-conflicts in to these inserts are detected. Without
2689 : * this final check, a sequential scan of the heap may have locked the
2690 : * table after the "before" check, missing one opportunity to detect the
2691 : * conflict, and then scanned the table before the new tuples were there,
2692 : * missing the other chance to detect the conflict.
2693 : *
2694 : * For heap inserts, we only need to check for table-level SSI locks. Our
2695 : * new tuples can't possibly conflict with existing tuple locks, and heap
2696 : * page locks are only consolidated versions of tuple locks; they do not
2697 : * lock "gaps" as index page locks do. So we don't need to specify a
2698 : * buffer when making the call.
2699 : */
2700 715958 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2701 :
2702 : /*
2703 : * If tuples are cachable, mark them for invalidation from the caches in
2704 : * case we abort. Note it is OK to do this after releasing the buffer,
2705 : * because the heaptuples data structure is all in local memory, not in
2706 : * the shared buffer.
2707 : */
2708 715958 : if (IsCatalogRelation(relation))
2709 : {
2710 2456268 : for (i = 0; i < ntuples; i++)
2711 1742736 : CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2712 : }
2713 :
2714 : /* copy t_self fields back to the caller's slots */
2715 3661808 : for (i = 0; i < ntuples; i++)
2716 2945850 : slots[i]->tts_tid = heaptuples[i]->t_self;
2717 :
2718 715958 : pgstat_count_heap_insert(relation, ntuples);
2719 715958 : }
2720 :
2721 : /*
2722 : * simple_heap_insert - insert a tuple
2723 : *
2724 : * Currently, this routine differs from heap_insert only in supplying
2725 : * a default command ID and not allowing access to the speedup options.
2726 : *
2727 : * This should be used rather than using heap_insert directly in most places
2728 : * where we are modifying system catalogs.
2729 : */
2730 : void
2731 1798966 : simple_heap_insert(Relation relation, HeapTuple tup)
2732 : {
2733 1798966 : heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2734 1798966 : }
2735 :
2736 : /*
2737 : * Given infomask/infomask2, compute the bits that must be saved in the
2738 : * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2739 : * xl_heap_lock_updated WAL records.
2740 : *
2741 : * See fix_infomask_from_infobits.
2742 : */
2743 : static uint8
2744 3895580 : compute_infobits(uint16 infomask, uint16 infomask2)
2745 : {
2746 : return
2747 3895580 : ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2748 3895580 : ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2749 3895580 : ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2750 : /* note we ignore HEAP_XMAX_SHR_LOCK here */
2751 7791160 : ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2752 : ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2753 3895580 : XLHL_KEYS_UPDATED : 0);
2754 : }
2755 :
2756 : /*
2757 : * Given two versions of the same t_infomask for a tuple, compare them and
2758 : * return whether the relevant status for a tuple Xmax has changed. This is
2759 : * used after a buffer lock has been released and reacquired: we want to ensure
2760 : * that the tuple state continues to be the same it was when we previously
2761 : * examined it.
2762 : *
2763 : * Note the Xmax field itself must be compared separately.
2764 : */
2765 : static inline bool
2766 10736 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2767 : {
2768 10736 : const uint16 interesting =
2769 : HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2770 :
2771 10736 : if ((new_infomask & interesting) != (old_infomask & interesting))
2772 28 : return true;
2773 :
2774 10708 : return false;
2775 : }
2776 :
2777 : /*
2778 : * heap_delete - delete a tuple
2779 : *
2780 : * See table_tuple_delete() for an explanation of the parameters, except that
2781 : * this routine directly takes a tuple rather than a slot.
2782 : *
2783 : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2784 : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2785 : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2786 : * generated by another transaction).
2787 : */
2788 : TM_Result
2789 2988604 : heap_delete(Relation relation, ItemPointer tid,
2790 : CommandId cid, Snapshot crosscheck, bool wait,
2791 : TM_FailureData *tmfd, bool changingPart)
2792 : {
2793 : TM_Result result;
2794 2988604 : TransactionId xid = GetCurrentTransactionId();
2795 : ItemId lp;
2796 : HeapTupleData tp;
2797 : Page page;
2798 : BlockNumber block;
2799 : Buffer buffer;
2800 2988604 : Buffer vmbuffer = InvalidBuffer;
2801 : TransactionId new_xmax;
2802 : uint16 new_infomask,
2803 : new_infomask2;
2804 2988604 : bool have_tuple_lock = false;
2805 : bool iscombo;
2806 2988604 : bool all_visible_cleared = false;
2807 2988604 : HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2808 2988604 : bool old_key_copied = false;
2809 :
2810 : Assert(ItemPointerIsValid(tid));
2811 :
2812 2988604 : AssertHasSnapshotForToast(relation);
2813 :
2814 : /*
2815 : * Forbid this during a parallel operation, lest it allocate a combo CID.
2816 : * Other workers might need that combo CID for visibility checks, and we
2817 : * have no provision for broadcasting it to them.
2818 : */
2819 2988604 : if (IsInParallelMode())
2820 0 : ereport(ERROR,
2821 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2822 : errmsg("cannot delete tuples during a parallel operation")));
2823 :
2824 2988604 : block = ItemPointerGetBlockNumber(tid);
2825 2988604 : buffer = ReadBuffer(relation, block);
2826 2988604 : page = BufferGetPage(buffer);
2827 :
2828 : /*
2829 : * Before locking the buffer, pin the visibility map page if it appears to
2830 : * be necessary. Since we haven't got the lock yet, someone else might be
2831 : * in the middle of changing this, so we'll need to recheck after we have
2832 : * the lock.
2833 : */
2834 2988604 : if (PageIsAllVisible(page))
2835 414 : visibilitymap_pin(relation, block, &vmbuffer);
2836 :
2837 2988604 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2838 :
2839 2988604 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2840 : Assert(ItemIdIsNormal(lp));
2841 :
2842 2988604 : tp.t_tableOid = RelationGetRelid(relation);
2843 2988604 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2844 2988604 : tp.t_len = ItemIdGetLength(lp);
2845 2988604 : tp.t_self = *tid;
2846 :
2847 2 : l1:
2848 :
2849 : /*
2850 : * If we didn't pin the visibility map page and the page has become all
2851 : * visible while we were busy locking the buffer, we'll have to unlock and
2852 : * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2853 : * unfortunate, but hopefully shouldn't happen often.
2854 : */
2855 2988606 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2856 : {
2857 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2858 0 : visibilitymap_pin(relation, block, &vmbuffer);
2859 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2860 : }
2861 :
2862 2988606 : result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2863 :
2864 2988606 : if (result == TM_Invisible)
2865 : {
2866 0 : UnlockReleaseBuffer(buffer);
2867 0 : ereport(ERROR,
2868 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2869 : errmsg("attempted to delete invisible tuple")));
2870 : }
2871 2988606 : else if (result == TM_BeingModified && wait)
2872 : {
2873 : TransactionId xwait;
2874 : uint16 infomask;
2875 :
2876 : /* must copy state data before unlocking buffer */
2877 81080 : xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2878 81080 : infomask = tp.t_data->t_infomask;
2879 :
2880 : /*
2881 : * Sleep until concurrent transaction ends -- except when there's a
2882 : * single locker and it's our own transaction. Note we don't care
2883 : * which lock mode the locker has, because we need the strongest one.
2884 : *
2885 : * Before sleeping, we need to acquire tuple lock to establish our
2886 : * priority for the tuple (see heap_lock_tuple). LockTuple will
2887 : * release us when we are next-in-line for the tuple.
2888 : *
2889 : * If we are forced to "start over" below, we keep the tuple lock;
2890 : * this arranges that we stay at the head of the line while rechecking
2891 : * tuple state.
2892 : */
2893 81080 : if (infomask & HEAP_XMAX_IS_MULTI)
2894 : {
2895 16 : bool current_is_member = false;
2896 :
2897 16 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2898 : LockTupleExclusive, ¤t_is_member))
2899 : {
2900 16 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2901 :
2902 : /*
2903 : * Acquire the lock, if necessary (but skip it when we're
2904 : * requesting a lock and already have one; avoids deadlock).
2905 : */
2906 16 : if (!current_is_member)
2907 12 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2908 : LockWaitBlock, &have_tuple_lock);
2909 :
2910 : /* wait for multixact */
2911 16 : MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2912 : relation, &(tp.t_self), XLTW_Delete,
2913 : NULL);
2914 16 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2915 :
2916 : /*
2917 : * If xwait had just locked the tuple then some other xact
2918 : * could update this tuple before we get to this point. Check
2919 : * for xmax change, and start over if so.
2920 : *
2921 : * We also must start over if we didn't pin the VM page, and
2922 : * the page has become all visible.
2923 : */
2924 32 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2925 32 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2926 16 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2927 : xwait))
2928 0 : goto l1;
2929 : }
2930 :
2931 : /*
2932 : * You might think the multixact is necessarily done here, but not
2933 : * so: it could have surviving members, namely our own xact or
2934 : * other subxacts of this backend. It is legal for us to delete
2935 : * the tuple in either case, however (the latter case is
2936 : * essentially a situation of upgrading our former shared lock to
2937 : * exclusive). We don't bother changing the on-disk hint bits
2938 : * since we are about to overwrite the xmax altogether.
2939 : */
2940 : }
2941 81064 : else if (!TransactionIdIsCurrentTransactionId(xwait))
2942 : {
2943 : /*
2944 : * Wait for regular transaction to end; but first, acquire tuple
2945 : * lock.
2946 : */
2947 100 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2948 100 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2949 : LockWaitBlock, &have_tuple_lock);
2950 100 : XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2951 92 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2952 :
2953 : /*
2954 : * xwait is done, but if xwait had just locked the tuple then some
2955 : * other xact could update this tuple before we get to this point.
2956 : * Check for xmax change, and start over if so.
2957 : *
2958 : * We also must start over if we didn't pin the VM page, and the
2959 : * page has become all visible.
2960 : */
2961 184 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
2962 182 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2963 90 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2964 : xwait))
2965 2 : goto l1;
2966 :
2967 : /* Otherwise check if it committed or aborted */
2968 90 : UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2969 : }
2970 :
2971 : /*
2972 : * We may overwrite if previous xmax aborted, or if it committed but
2973 : * only locked the tuple without updating it.
2974 : */
2975 162100 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2976 81088 : HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
2977 58 : HeapTupleHeaderIsOnlyLocked(tp.t_data))
2978 81020 : result = TM_Ok;
2979 50 : else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2980 42 : result = TM_Updated;
2981 : else
2982 8 : result = TM_Deleted;
2983 : }
2984 :
2985 : /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
2986 : if (result != TM_Ok)
2987 : {
2988 : Assert(result == TM_SelfModified ||
2989 : result == TM_Updated ||
2990 : result == TM_Deleted ||
2991 : result == TM_BeingModified);
2992 : Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2993 : Assert(result != TM_Updated ||
2994 : !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2995 : }
2996 :
2997 2988596 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
2998 : {
2999 : /* Perform additional check for transaction-snapshot mode RI updates */
3000 2 : if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3001 2 : result = TM_Updated;
3002 : }
3003 :
3004 2988596 : if (result != TM_Ok)
3005 : {
3006 120 : tmfd->ctid = tp.t_data->t_ctid;
3007 120 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
3008 120 : if (result == TM_SelfModified)
3009 42 : tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3010 : else
3011 78 : tmfd->cmax = InvalidCommandId;
3012 120 : UnlockReleaseBuffer(buffer);
3013 120 : if (have_tuple_lock)
3014 50 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3015 120 : if (vmbuffer != InvalidBuffer)
3016 0 : ReleaseBuffer(vmbuffer);
3017 120 : return result;
3018 : }
3019 :
3020 : /*
3021 : * We're about to do the actual delete -- check for conflict first, to
3022 : * avoid possibly having to roll back work we've just done.
3023 : *
3024 : * This is safe without a recheck as long as there is no possibility of
3025 : * another process scanning the page between this check and the delete
3026 : * being visible to the scan (i.e., an exclusive buffer content lock is
3027 : * continuously held from this point until the tuple delete is visible).
3028 : */
3029 2988476 : CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
3030 :
3031 : /* replace cid with a combo CID if necessary */
3032 2988448 : HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3033 :
3034 : /*
3035 : * Compute replica identity tuple before entering the critical section so
3036 : * we don't PANIC upon a memory allocation failure.
3037 : */
3038 2988448 : old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3039 :
3040 : /*
3041 : * If this is the first possibly-multixact-able operation in the current
3042 : * transaction, set my per-backend OldestMemberMXactId setting. We can be
3043 : * certain that the transaction will never become a member of any older
3044 : * MultiXactIds than that. (We have to do this even if we end up just
3045 : * using our own TransactionId below, since some other backend could
3046 : * incorporate our XID into a MultiXact immediately afterwards.)
3047 : */
3048 2988448 : MultiXactIdSetOldestMember();
3049 :
3050 2988448 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
3051 2988448 : tp.t_data->t_infomask, tp.t_data->t_infomask2,
3052 : xid, LockTupleExclusive, true,
3053 : &new_xmax, &new_infomask, &new_infomask2);
3054 :
3055 2988448 : START_CRIT_SECTION();
3056 :
3057 : /*
3058 : * If this transaction commits, the tuple will become DEAD sooner or
3059 : * later. Set flag that this page is a candidate for pruning once our xid
3060 : * falls below the OldestXmin horizon. If the transaction finally aborts,
3061 : * the subsequent page pruning will be a no-op and the hint will be
3062 : * cleared.
3063 : */
3064 2988448 : PageSetPrunable(page, xid);
3065 :
3066 2988448 : if (PageIsAllVisible(page))
3067 : {
3068 414 : all_visible_cleared = true;
3069 414 : PageClearAllVisible(page);
3070 414 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3071 : vmbuffer, VISIBILITYMAP_VALID_BITS);
3072 : }
3073 :
3074 : /* store transaction information of xact deleting the tuple */
3075 2988448 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3076 2988448 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3077 2988448 : tp.t_data->t_infomask |= new_infomask;
3078 2988448 : tp.t_data->t_infomask2 |= new_infomask2;
3079 2988448 : HeapTupleHeaderClearHotUpdated(tp.t_data);
3080 2988448 : HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3081 2988448 : HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3082 : /* Make sure there is no forward chain link in t_ctid */
3083 2988448 : tp.t_data->t_ctid = tp.t_self;
3084 :
3085 : /* Signal that this is actually a move into another partition */
3086 2988448 : if (changingPart)
3087 968 : HeapTupleHeaderSetMovedPartitions(tp.t_data);
3088 :
3089 2988448 : MarkBufferDirty(buffer);
3090 :
3091 : /*
3092 : * XLOG stuff
3093 : *
3094 : * NB: heap_abort_speculative() uses the same xlog record and replay
3095 : * routines.
3096 : */
3097 2988448 : if (RelationNeedsWAL(relation))
3098 : {
3099 : xl_heap_delete xlrec;
3100 : xl_heap_header xlhdr;
3101 : XLogRecPtr recptr;
3102 :
3103 : /*
3104 : * For logical decode we need combo CIDs to properly decode the
3105 : * catalog
3106 : */
3107 2863210 : if (RelationIsAccessibleInLogicalDecoding(relation))
3108 12516 : log_heap_new_cid(relation, &tp);
3109 :
3110 2863210 : xlrec.flags = 0;
3111 2863210 : if (all_visible_cleared)
3112 414 : xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
3113 2863210 : if (changingPart)
3114 968 : xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
3115 5726420 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3116 2863210 : tp.t_data->t_infomask2);
3117 2863210 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
3118 2863210 : xlrec.xmax = new_xmax;
3119 :
3120 2863210 : if (old_key_tuple != NULL)
3121 : {
3122 94036 : if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3123 264 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
3124 : else
3125 93772 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
3126 : }
3127 :
3128 2863210 : XLogBeginInsert();
3129 2863210 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
3130 :
3131 2863210 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3132 :
3133 : /*
3134 : * Log replica identity of the deleted tuple if there is one
3135 : */
3136 2863210 : if (old_key_tuple != NULL)
3137 : {
3138 94036 : xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3139 94036 : xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3140 94036 : xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3141 :
3142 94036 : XLogRegisterData(&xlhdr, SizeOfHeapHeader);
3143 94036 : XLogRegisterData((char *) old_key_tuple->t_data
3144 : + SizeofHeapTupleHeader,
3145 94036 : old_key_tuple->t_len
3146 : - SizeofHeapTupleHeader);
3147 : }
3148 :
3149 : /* filtering by origin on a row level is much more efficient */
3150 2863210 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
3151 :
3152 2863210 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3153 :
3154 2863210 : PageSetLSN(page, recptr);
3155 : }
3156 :
3157 2988448 : END_CRIT_SECTION();
3158 :
3159 2988448 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3160 :
3161 2988448 : if (vmbuffer != InvalidBuffer)
3162 414 : ReleaseBuffer(vmbuffer);
3163 :
3164 : /*
3165 : * If the tuple has toasted out-of-line attributes, we need to delete
3166 : * those items too. We have to do this before releasing the buffer
3167 : * because we need to look at the contents of the tuple, but it's OK to
3168 : * release the content lock on the buffer first.
3169 : */
3170 2988448 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3171 5166 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3172 : {
3173 : /* toast table entries should never be recursively toasted */
3174 : Assert(!HeapTupleHasExternal(&tp));
3175 : }
3176 2983302 : else if (HeapTupleHasExternal(&tp))
3177 598 : heap_toast_delete(relation, &tp, false);
3178 :
3179 : /*
3180 : * Mark tuple for invalidation from system caches at next command
3181 : * boundary. We have to do this before releasing the buffer because we
3182 : * need to look at the contents of the tuple.
3183 : */
3184 2988448 : CacheInvalidateHeapTuple(relation, &tp, NULL);
3185 :
3186 : /* Now we can release the buffer */
3187 2988448 : ReleaseBuffer(buffer);
3188 :
3189 : /*
3190 : * Release the lmgr tuple lock, if we had it.
3191 : */
3192 2988448 : if (have_tuple_lock)
3193 52 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3194 :
3195 2988448 : pgstat_count_heap_delete(relation);
3196 :
3197 2988448 : if (old_key_tuple != NULL && old_key_copied)
3198 93774 : heap_freetuple(old_key_tuple);
3199 :
3200 2988448 : return TM_Ok;
3201 : }
3202 :
3203 : /*
3204 : * simple_heap_delete - delete a tuple
3205 : *
3206 : * This routine may be used to delete a tuple when concurrent updates of
3207 : * the target tuple are not expected (for example, because we have a lock
3208 : * on the relation associated with the tuple). Any failure is reported
3209 : * via ereport().
3210 : */
3211 : void
3212 1256742 : simple_heap_delete(Relation relation, ItemPointer tid)
3213 : {
3214 : TM_Result result;
3215 : TM_FailureData tmfd;
3216 :
3217 1256742 : result = heap_delete(relation, tid,
3218 : GetCurrentCommandId(true), InvalidSnapshot,
3219 : true /* wait for commit */ ,
3220 : &tmfd, false /* changingPart */ );
3221 1256742 : switch (result)
3222 : {
3223 0 : case TM_SelfModified:
3224 : /* Tuple was already updated in current command? */
3225 0 : elog(ERROR, "tuple already updated by self");
3226 : break;
3227 :
3228 1256742 : case TM_Ok:
3229 : /* done successfully */
3230 1256742 : break;
3231 :
3232 0 : case TM_Updated:
3233 0 : elog(ERROR, "tuple concurrently updated");
3234 : break;
3235 :
3236 0 : case TM_Deleted:
3237 0 : elog(ERROR, "tuple concurrently deleted");
3238 : break;
3239 :
3240 0 : default:
3241 0 : elog(ERROR, "unrecognized heap_delete status: %u", result);
3242 : break;
3243 : }
3244 1256742 : }
3245 :
3246 : /*
3247 : * heap_update - replace a tuple
3248 : *
3249 : * See table_tuple_update() for an explanation of the parameters, except that
3250 : * this routine directly takes a tuple rather than a slot.
3251 : *
3252 : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3253 : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3254 : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3255 : * generated by another transaction).
3256 : */
3257 : TM_Result
3258 609430 : heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3259 : CommandId cid, Snapshot crosscheck, bool wait,
3260 : TM_FailureData *tmfd, LockTupleMode *lockmode,
3261 : TU_UpdateIndexes *update_indexes)
3262 : {
3263 : TM_Result result;
3264 609430 : TransactionId xid = GetCurrentTransactionId();
3265 : Bitmapset *hot_attrs;
3266 : Bitmapset *sum_attrs;
3267 : Bitmapset *key_attrs;
3268 : Bitmapset *id_attrs;
3269 : Bitmapset *interesting_attrs;
3270 : Bitmapset *modified_attrs;
3271 : ItemId lp;
3272 : HeapTupleData oldtup;
3273 : HeapTuple heaptup;
3274 609430 : HeapTuple old_key_tuple = NULL;
3275 609430 : bool old_key_copied = false;
3276 : Page page;
3277 : BlockNumber block;
3278 : MultiXactStatus mxact_status;
3279 : Buffer buffer,
3280 : newbuf,
3281 609430 : vmbuffer = InvalidBuffer,
3282 609430 : vmbuffer_new = InvalidBuffer;
3283 : bool need_toast;
3284 : Size newtupsize,
3285 : pagefree;
3286 609430 : bool have_tuple_lock = false;
3287 : bool iscombo;
3288 609430 : bool use_hot_update = false;
3289 609430 : bool summarized_update = false;
3290 : bool key_intact;
3291 609430 : bool all_visible_cleared = false;
3292 609430 : bool all_visible_cleared_new = false;
3293 : bool checked_lockers;
3294 : bool locker_remains;
3295 609430 : bool id_has_external = false;
3296 : TransactionId xmax_new_tuple,
3297 : xmax_old_tuple;
3298 : uint16 infomask_old_tuple,
3299 : infomask2_old_tuple,
3300 : infomask_new_tuple,
3301 : infomask2_new_tuple;
3302 :
3303 : Assert(ItemPointerIsValid(otid));
3304 :
3305 : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
3306 : Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
3307 : RelationGetNumberOfAttributes(relation));
3308 :
3309 609430 : AssertHasSnapshotForToast(relation);
3310 :
3311 : /*
3312 : * Forbid this during a parallel operation, lest it allocate a combo CID.
3313 : * Other workers might need that combo CID for visibility checks, and we
3314 : * have no provision for broadcasting it to them.
3315 : */
3316 609430 : if (IsInParallelMode())
3317 0 : ereport(ERROR,
3318 : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3319 : errmsg("cannot update tuples during a parallel operation")));
3320 :
3321 : #ifdef USE_ASSERT_CHECKING
3322 : check_lock_if_inplace_updateable_rel(relation, otid, newtup);
3323 : #endif
3324 :
3325 : /*
3326 : * Fetch the list of attributes to be checked for various operations.
3327 : *
3328 : * For HOT considerations, this is wasted effort if we fail to update or
3329 : * have to put the new tuple on a different page. But we must compute the
3330 : * list before obtaining buffer lock --- in the worst case, if we are
3331 : * doing an update on one of the relevant system catalogs, we could
3332 : * deadlock if we try to fetch the list later. In any case, the relcache
3333 : * caches the data so this is usually pretty cheap.
3334 : *
3335 : * We also need columns used by the replica identity and columns that are
3336 : * considered the "key" of rows in the table.
3337 : *
3338 : * Note that we get copies of each bitmap, so we need not worry about
3339 : * relcache flush happening midway through.
3340 : */
3341 609430 : hot_attrs = RelationGetIndexAttrBitmap(relation,
3342 : INDEX_ATTR_BITMAP_HOT_BLOCKING);
3343 609430 : sum_attrs = RelationGetIndexAttrBitmap(relation,
3344 : INDEX_ATTR_BITMAP_SUMMARIZED);
3345 609430 : key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3346 609430 : id_attrs = RelationGetIndexAttrBitmap(relation,
3347 : INDEX_ATTR_BITMAP_IDENTITY_KEY);
3348 609430 : interesting_attrs = NULL;
3349 609430 : interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
3350 609430 : interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
3351 609430 : interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
3352 609430 : interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
3353 :
3354 609430 : block = ItemPointerGetBlockNumber(otid);
3355 609430 : INJECTION_POINT("heap_update-before-pin", NULL);
3356 609430 : buffer = ReadBuffer(relation, block);
3357 609430 : page = BufferGetPage(buffer);
3358 :
3359 : /*
3360 : * Before locking the buffer, pin the visibility map page if it appears to
3361 : * be necessary. Since we haven't got the lock yet, someone else might be
3362 : * in the middle of changing this, so we'll need to recheck after we have
3363 : * the lock.
3364 : */
3365 609430 : if (PageIsAllVisible(page))
3366 3372 : visibilitymap_pin(relation, block, &vmbuffer);
3367 :
3368 609430 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3369 :
3370 609430 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3371 :
3372 : /*
3373 : * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3374 : * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3375 : * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3376 : * of which indicates concurrent pruning.
3377 : *
3378 : * Failing with TM_Updated would be most accurate. However, unlike other
3379 : * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3380 : * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3381 : * does matter to SQL statements UPDATE and MERGE, those SQL statements
3382 : * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3383 : * TM_Updated and TM_Deleted affects only the wording of error messages.
3384 : * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3385 : * the specification of when tmfd->ctid is valid. Second, it creates
3386 : * error log evidence that we took this branch.
3387 : *
3388 : * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3389 : * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3390 : * unrelated row, we'll fail with "duplicate key value violates unique".
3391 : * XXX if otid is the live, newer version of the newtup row, we'll discard
3392 : * changes originating in versions of this catalog row after the version
3393 : * the caller got from syscache. See syscache-update-pruned.spec.
3394 : */
3395 609430 : if (!ItemIdIsNormal(lp))
3396 : {
3397 : Assert(RelationSupportsSysCache(RelationGetRelid(relation)));
3398 :
3399 2 : UnlockReleaseBuffer(buffer);
3400 : Assert(!have_tuple_lock);
3401 2 : if (vmbuffer != InvalidBuffer)
3402 2 : ReleaseBuffer(vmbuffer);
3403 2 : tmfd->ctid = *otid;
3404 2 : tmfd->xmax = InvalidTransactionId;
3405 2 : tmfd->cmax = InvalidCommandId;
3406 2 : *update_indexes = TU_None;
3407 :
3408 2 : bms_free(hot_attrs);
3409 2 : bms_free(sum_attrs);
3410 2 : bms_free(key_attrs);
3411 2 : bms_free(id_attrs);
3412 : /* modified_attrs not yet initialized */
3413 2 : bms_free(interesting_attrs);
3414 2 : return TM_Deleted;
3415 : }
3416 :
3417 : /*
3418 : * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3419 : * properly.
3420 : */
3421 609428 : oldtup.t_tableOid = RelationGetRelid(relation);
3422 609428 : oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3423 609428 : oldtup.t_len = ItemIdGetLength(lp);
3424 609428 : oldtup.t_self = *otid;
3425 :
3426 : /* the new tuple is ready, except for this: */
3427 609428 : newtup->t_tableOid = RelationGetRelid(relation);
3428 :
3429 : /*
3430 : * Determine columns modified by the update. Additionally, identify
3431 : * whether any of the unmodified replica identity key attributes in the
3432 : * old tuple is externally stored or not. This is required because for
3433 : * such attributes the flattened value won't be WAL logged as part of the
3434 : * new tuple so we must include it as part of the old_key_tuple. See
3435 : * ExtractReplicaIdentity.
3436 : */
3437 609428 : modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
3438 : id_attrs, &oldtup,
3439 : newtup, &id_has_external);
3440 :
3441 : /*
3442 : * If we're not updating any "key" column, we can grab a weaker lock type.
3443 : * This allows for more concurrency when we are running simultaneously
3444 : * with foreign key checks.
3445 : *
3446 : * Note that if a column gets detoasted while executing the update, but
3447 : * the value ends up being the same, this test will fail and we will use
3448 : * the stronger lock. This is acceptable; the important case to optimize
3449 : * is updates that don't manipulate key columns, not those that
3450 : * serendipitously arrive at the same key values.
3451 : */
3452 609428 : if (!bms_overlap(modified_attrs, key_attrs))
3453 : {
3454 601132 : *lockmode = LockTupleNoKeyExclusive;
3455 601132 : mxact_status = MultiXactStatusNoKeyUpdate;
3456 601132 : key_intact = true;
3457 :
3458 : /*
3459 : * If this is the first possibly-multixact-able operation in the
3460 : * current transaction, set my per-backend OldestMemberMXactId
3461 : * setting. We can be certain that the transaction will never become a
3462 : * member of any older MultiXactIds than that. (We have to do this
3463 : * even if we end up just using our own TransactionId below, since
3464 : * some other backend could incorporate our XID into a MultiXact
3465 : * immediately afterwards.)
3466 : */
3467 601132 : MultiXactIdSetOldestMember();
3468 : }
3469 : else
3470 : {
3471 8296 : *lockmode = LockTupleExclusive;
3472 8296 : mxact_status = MultiXactStatusUpdate;
3473 8296 : key_intact = false;
3474 : }
3475 :
3476 : /*
3477 : * Note: beyond this point, use oldtup not otid to refer to old tuple.
3478 : * otid may very well point at newtup->t_self, which we will overwrite
3479 : * with the new tuple's location, so there's great risk of confusion if we
3480 : * use otid anymore.
3481 : */
3482 :
3483 609428 : l2:
3484 609430 : checked_lockers = false;
3485 609430 : locker_remains = false;
3486 609430 : result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3487 :
3488 : /* see below about the "no wait" case */
3489 : Assert(result != TM_BeingModified || wait);
3490 :
3491 609430 : if (result == TM_Invisible)
3492 : {
3493 0 : UnlockReleaseBuffer(buffer);
3494 0 : ereport(ERROR,
3495 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3496 : errmsg("attempted to update invisible tuple")));
3497 : }
3498 609430 : else if (result == TM_BeingModified && wait)
3499 : {
3500 : TransactionId xwait;
3501 : uint16 infomask;
3502 71880 : bool can_continue = false;
3503 :
3504 : /*
3505 : * XXX note that we don't consider the "no wait" case here. This
3506 : * isn't a problem currently because no caller uses that case, but it
3507 : * should be fixed if such a caller is introduced. It wasn't a
3508 : * problem previously because this code would always wait, but now
3509 : * that some tuple locks do not conflict with one of the lock modes we
3510 : * use, it is possible that this case is interesting to handle
3511 : * specially.
3512 : *
3513 : * This may cause failures with third-party code that calls
3514 : * heap_update directly.
3515 : */
3516 :
3517 : /* must copy state data before unlocking buffer */
3518 71880 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3519 71880 : infomask = oldtup.t_data->t_infomask;
3520 :
3521 : /*
3522 : * Now we have to do something about the existing locker. If it's a
3523 : * multi, sleep on it; we might be awakened before it is completely
3524 : * gone (or even not sleep at all in some cases); we need to preserve
3525 : * it as locker, unless it is gone completely.
3526 : *
3527 : * If it's not a multi, we need to check for sleeping conditions
3528 : * before actually going to sleep. If the update doesn't conflict
3529 : * with the locks, we just continue without sleeping (but making sure
3530 : * it is preserved).
3531 : *
3532 : * Before sleeping, we need to acquire tuple lock to establish our
3533 : * priority for the tuple (see heap_lock_tuple). LockTuple will
3534 : * release us when we are next-in-line for the tuple. Note we must
3535 : * not acquire the tuple lock until we're sure we're going to sleep;
3536 : * otherwise we're open for race conditions with other transactions
3537 : * holding the tuple lock which sleep on us.
3538 : *
3539 : * If we are forced to "start over" below, we keep the tuple lock;
3540 : * this arranges that we stay at the head of the line while rechecking
3541 : * tuple state.
3542 : */
3543 71880 : if (infomask & HEAP_XMAX_IS_MULTI)
3544 : {
3545 : TransactionId update_xact;
3546 : int remain;
3547 120 : bool current_is_member = false;
3548 :
3549 120 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3550 : *lockmode, ¤t_is_member))
3551 : {
3552 16 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3553 :
3554 : /*
3555 : * Acquire the lock, if necessary (but skip it when we're
3556 : * requesting a lock and already have one; avoids deadlock).
3557 : */
3558 16 : if (!current_is_member)
3559 0 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3560 : LockWaitBlock, &have_tuple_lock);
3561 :
3562 : /* wait for multixact */
3563 16 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3564 : relation, &oldtup.t_self, XLTW_Update,
3565 : &remain);
3566 16 : checked_lockers = true;
3567 16 : locker_remains = remain != 0;
3568 16 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3569 :
3570 : /*
3571 : * If xwait had just locked the tuple then some other xact
3572 : * could update this tuple before we get to this point. Check
3573 : * for xmax change, and start over if so.
3574 : */
3575 16 : if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3576 16 : infomask) ||
3577 16 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3578 : xwait))
3579 0 : goto l2;
3580 : }
3581 :
3582 : /*
3583 : * Note that the multixact may not be done by now. It could have
3584 : * surviving members; our own xact or other subxacts of this
3585 : * backend, and also any other concurrent transaction that locked
3586 : * the tuple with LockTupleKeyShare if we only got
3587 : * LockTupleNoKeyExclusive. If this is the case, we have to be
3588 : * careful to mark the updated tuple with the surviving members in
3589 : * Xmax.
3590 : *
3591 : * Note that there could have been another update in the
3592 : * MultiXact. In that case, we need to check whether it committed
3593 : * or aborted. If it aborted we are safe to update it again;
3594 : * otherwise there is an update conflict, and we have to return
3595 : * TableTuple{Deleted, Updated} below.
3596 : *
3597 : * In the LockTupleExclusive case, we still need to preserve the
3598 : * surviving members: those would include the tuple locks we had
3599 : * before this one, which are important to keep in case this
3600 : * subxact aborts.
3601 : */
3602 120 : if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3603 16 : update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3604 : else
3605 104 : update_xact = InvalidTransactionId;
3606 :
3607 : /*
3608 : * There was no UPDATE in the MultiXact; or it aborted. No
3609 : * TransactionIdIsInProgress() call needed here, since we called
3610 : * MultiXactIdWait() above.
3611 : */
3612 136 : if (!TransactionIdIsValid(update_xact) ||
3613 16 : TransactionIdDidAbort(update_xact))
3614 106 : can_continue = true;
3615 : }
3616 71760 : else if (TransactionIdIsCurrentTransactionId(xwait))
3617 : {
3618 : /*
3619 : * The only locker is ourselves; we can avoid grabbing the tuple
3620 : * lock here, but must preserve our locking information.
3621 : */
3622 71548 : checked_lockers = true;
3623 71548 : locker_remains = true;
3624 71548 : can_continue = true;
3625 : }
3626 212 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3627 : {
3628 : /*
3629 : * If it's just a key-share locker, and we're not changing the key
3630 : * columns, we don't need to wait for it to end; but we need to
3631 : * preserve it as locker.
3632 : */
3633 58 : checked_lockers = true;
3634 58 : locker_remains = true;
3635 58 : can_continue = true;
3636 : }
3637 : else
3638 : {
3639 : /*
3640 : * Wait for regular transaction to end; but first, acquire tuple
3641 : * lock.
3642 : */
3643 154 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3644 154 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3645 : LockWaitBlock, &have_tuple_lock);
3646 154 : XactLockTableWait(xwait, relation, &oldtup.t_self,
3647 : XLTW_Update);
3648 154 : checked_lockers = true;
3649 154 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3650 :
3651 : /*
3652 : * xwait is done, but if xwait had just locked the tuple then some
3653 : * other xact could update this tuple before we get to this point.
3654 : * Check for xmax change, and start over if so.
3655 : */
3656 306 : if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3657 152 : !TransactionIdEquals(xwait,
3658 : HeapTupleHeaderGetRawXmax(oldtup.t_data)))
3659 2 : goto l2;
3660 :
3661 : /* Otherwise check if it committed or aborted */
3662 152 : UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3663 152 : if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
3664 44 : can_continue = true;
3665 : }
3666 :
3667 71878 : if (can_continue)
3668 71756 : result = TM_Ok;
3669 122 : else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
3670 112 : result = TM_Updated;
3671 : else
3672 10 : result = TM_Deleted;
3673 : }
3674 :
3675 : /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3676 : if (result != TM_Ok)
3677 : {
3678 : Assert(result == TM_SelfModified ||
3679 : result == TM_Updated ||
3680 : result == TM_Deleted ||
3681 : result == TM_BeingModified);
3682 : Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
3683 : Assert(result != TM_Updated ||
3684 : !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3685 : }
3686 :
3687 609428 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
3688 : {
3689 : /* Perform additional check for transaction-snapshot mode RI updates */
3690 2 : if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3691 2 : result = TM_Updated;
3692 : }
3693 :
3694 609428 : if (result != TM_Ok)
3695 : {
3696 316 : tmfd->ctid = oldtup.t_data->t_ctid;
3697 316 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3698 316 : if (result == TM_SelfModified)
3699 104 : tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3700 : else
3701 212 : tmfd->cmax = InvalidCommandId;
3702 316 : UnlockReleaseBuffer(buffer);
3703 316 : if (have_tuple_lock)
3704 108 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
3705 316 : if (vmbuffer != InvalidBuffer)
3706 0 : ReleaseBuffer(vmbuffer);
3707 316 : *update_indexes = TU_None;
3708 :
3709 316 : bms_free(hot_attrs);
3710 316 : bms_free(sum_attrs);
3711 316 : bms_free(key_attrs);
3712 316 : bms_free(id_attrs);
3713 316 : bms_free(modified_attrs);
3714 316 : bms_free(interesting_attrs);
3715 316 : return result;
3716 : }
3717 :
3718 : /*
3719 : * If we didn't pin the visibility map page and the page has become all
3720 : * visible while we were busy locking the buffer, or during some
3721 : * subsequent window during which we had it unlocked, we'll have to unlock
3722 : * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3723 : * bit unfortunate, especially since we'll now have to recheck whether the
3724 : * tuple has been locked or updated under us, but hopefully it won't
3725 : * happen very often.
3726 : */
3727 609112 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3728 : {
3729 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3730 0 : visibilitymap_pin(relation, block, &vmbuffer);
3731 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3732 0 : goto l2;
3733 : }
3734 :
3735 : /* Fill in transaction status data */
3736 :
3737 : /*
3738 : * If the tuple we're updating is locked, we need to preserve the locking
3739 : * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3740 : */
3741 609112 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3742 609112 : oldtup.t_data->t_infomask,
3743 609112 : oldtup.t_data->t_infomask2,
3744 : xid, *lockmode, true,
3745 : &xmax_old_tuple, &infomask_old_tuple,
3746 : &infomask2_old_tuple);
3747 :
3748 : /*
3749 : * And also prepare an Xmax value for the new copy of the tuple. If there
3750 : * was no xmax previously, or there was one but all lockers are now gone,
3751 : * then use InvalidTransactionId; otherwise, get the xmax from the old
3752 : * tuple. (In rare cases that might also be InvalidTransactionId and yet
3753 : * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3754 : */
3755 680824 : if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3756 143424 : HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
3757 71608 : (checked_lockers && !locker_remains))
3758 537400 : xmax_new_tuple = InvalidTransactionId;
3759 : else
3760 71712 : xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3761 :
3762 609112 : if (!TransactionIdIsValid(xmax_new_tuple))
3763 : {
3764 537400 : infomask_new_tuple = HEAP_XMAX_INVALID;
3765 537400 : infomask2_new_tuple = 0;
3766 : }
3767 : else
3768 : {
3769 : /*
3770 : * If we found a valid Xmax for the new tuple, then the infomask bits
3771 : * to use on the new tuple depend on what was there on the old one.
3772 : * Note that since we're doing an update, the only possibility is that
3773 : * the lockers had FOR KEY SHARE lock.
3774 : */
3775 71712 : if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3776 : {
3777 106 : GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3778 : &infomask2_new_tuple);
3779 : }
3780 : else
3781 : {
3782 71606 : infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3783 71606 : infomask2_new_tuple = 0;
3784 : }
3785 : }
3786 :
3787 : /*
3788 : * Prepare the new tuple with the appropriate initial values of Xmin and
3789 : * Xmax, as well as initial infomask bits as computed above.
3790 : */
3791 609112 : newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
3792 609112 : newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
3793 609112 : HeapTupleHeaderSetXmin(newtup->t_data, xid);
3794 609112 : HeapTupleHeaderSetCmin(newtup->t_data, cid);
3795 609112 : newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3796 609112 : newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3797 609112 : HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3798 :
3799 : /*
3800 : * Replace cid with a combo CID if necessary. Note that we already put
3801 : * the plain cid into the new tuple.
3802 : */
3803 609112 : HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3804 :
3805 : /*
3806 : * If the toaster needs to be activated, OR if the new tuple will not fit
3807 : * on the same page as the old, then we need to release the content lock
3808 : * (but not the pin!) on the old tuple's buffer while we are off doing
3809 : * TOAST and/or table-file-extension work. We must mark the old tuple to
3810 : * show that it's locked, else other processes may try to update it
3811 : * themselves.
3812 : *
3813 : * We need to invoke the toaster if there are already any out-of-line
3814 : * toasted values present, or if the new tuple is over-threshold.
3815 : */
3816 609112 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3817 0 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3818 : {
3819 : /* toast table entries should never be recursively toasted */
3820 : Assert(!HeapTupleHasExternal(&oldtup));
3821 : Assert(!HeapTupleHasExternal(newtup));
3822 0 : need_toast = false;
3823 : }
3824 : else
3825 1826578 : need_toast = (HeapTupleHasExternal(&oldtup) ||
3826 1217466 : HeapTupleHasExternal(newtup) ||
3827 608306 : newtup->t_len > TOAST_TUPLE_THRESHOLD);
3828 :
3829 609112 : pagefree = PageGetHeapFreeSpace(page);
3830 :
3831 609112 : newtupsize = MAXALIGN(newtup->t_len);
3832 :
3833 609112 : if (need_toast || newtupsize > pagefree)
3834 300444 : {
3835 : TransactionId xmax_lock_old_tuple;
3836 : uint16 infomask_lock_old_tuple,
3837 : infomask2_lock_old_tuple;
3838 300444 : bool cleared_all_frozen = false;
3839 :
3840 : /*
3841 : * To prevent concurrent sessions from updating the tuple, we have to
3842 : * temporarily mark it locked, while we release the page-level lock.
3843 : *
3844 : * To satisfy the rule that any xid potentially appearing in a buffer
3845 : * written out to disk, we unfortunately have to WAL log this
3846 : * temporary modification. We can reuse xl_heap_lock for this
3847 : * purpose. If we crash/error before following through with the
3848 : * actual update, xmax will be of an aborted transaction, allowing
3849 : * other sessions to proceed.
3850 : */
3851 :
3852 : /*
3853 : * Compute xmax / infomask appropriate for locking the tuple. This has
3854 : * to be done separately from the combo that's going to be used for
3855 : * updating, because the potentially created multixact would otherwise
3856 : * be wrong.
3857 : */
3858 300444 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3859 300444 : oldtup.t_data->t_infomask,
3860 300444 : oldtup.t_data->t_infomask2,
3861 : xid, *lockmode, false,
3862 : &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3863 : &infomask2_lock_old_tuple);
3864 :
3865 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
3866 :
3867 300444 : START_CRIT_SECTION();
3868 :
3869 : /* Clear obsolete visibility flags ... */
3870 300444 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3871 300444 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3872 300444 : HeapTupleClearHotUpdated(&oldtup);
3873 : /* ... and store info about transaction updating this tuple */
3874 : Assert(TransactionIdIsValid(xmax_lock_old_tuple));
3875 300444 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
3876 300444 : oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3877 300444 : oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
3878 300444 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3879 :
3880 : /* temporarily make it look not-updated, but locked */
3881 300444 : oldtup.t_data->t_ctid = oldtup.t_self;
3882 :
3883 : /*
3884 : * Clear all-frozen bit on visibility map if needed. We could
3885 : * immediately reset ALL_VISIBLE, but given that the WAL logging
3886 : * overhead would be unchanged, that doesn't seem necessarily
3887 : * worthwhile.
3888 : */
3889 302264 : if (PageIsAllVisible(page) &&
3890 1820 : visibilitymap_clear(relation, block, vmbuffer,
3891 : VISIBILITYMAP_ALL_FROZEN))
3892 1482 : cleared_all_frozen = true;
3893 :
3894 300444 : MarkBufferDirty(buffer);
3895 :
3896 300444 : if (RelationNeedsWAL(relation))
3897 : {
3898 : xl_heap_lock xlrec;
3899 : XLogRecPtr recptr;
3900 :
3901 280186 : XLogBeginInsert();
3902 280186 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3903 :
3904 280186 : xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
3905 280186 : xlrec.xmax = xmax_lock_old_tuple;
3906 560372 : xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3907 280186 : oldtup.t_data->t_infomask2);
3908 280186 : xlrec.flags =
3909 280186 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
3910 280186 : XLogRegisterData(&xlrec, SizeOfHeapLock);
3911 280186 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
3912 280186 : PageSetLSN(page, recptr);
3913 : }
3914 :
3915 300444 : END_CRIT_SECTION();
3916 :
3917 300444 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3918 :
3919 : /*
3920 : * Let the toaster do its thing, if needed.
3921 : *
3922 : * Note: below this point, heaptup is the data we actually intend to
3923 : * store into the relation; newtup is the caller's original untoasted
3924 : * data.
3925 : */
3926 300444 : if (need_toast)
3927 : {
3928 : /* Note we always use WAL and FSM during updates */
3929 3310 : heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
3930 3310 : newtupsize = MAXALIGN(heaptup->t_len);
3931 : }
3932 : else
3933 297134 : heaptup = newtup;
3934 :
3935 : /*
3936 : * Now, do we need a new page for the tuple, or not? This is a bit
3937 : * tricky since someone else could have added tuples to the page while
3938 : * we weren't looking. We have to recheck the available space after
3939 : * reacquiring the buffer lock. But don't bother to do that if the
3940 : * former amount of free space is still not enough; it's unlikely
3941 : * there's more free now than before.
3942 : *
3943 : * What's more, if we need to get a new page, we will need to acquire
3944 : * buffer locks on both old and new pages. To avoid deadlock against
3945 : * some other backend trying to get the same two locks in the other
3946 : * order, we must be consistent about the order we get the locks in.
3947 : * We use the rule "lock the lower-numbered page of the relation
3948 : * first". To implement this, we must do RelationGetBufferForTuple
3949 : * while not holding the lock on the old page, and we must rely on it
3950 : * to get the locks on both pages in the correct order.
3951 : *
3952 : * Another consideration is that we need visibility map page pin(s) if
3953 : * we will have to clear the all-visible flag on either page. If we
3954 : * call RelationGetBufferForTuple, we rely on it to acquire any such
3955 : * pins; but if we don't, we have to handle that here. Hence we need
3956 : * a loop.
3957 : */
3958 : for (;;)
3959 : {
3960 300444 : if (newtupsize > pagefree)
3961 : {
3962 : /* It doesn't fit, must use RelationGetBufferForTuple. */
3963 299338 : newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
3964 : buffer, 0, NULL,
3965 : &vmbuffer_new, &vmbuffer,
3966 : 0);
3967 : /* We're all done. */
3968 299338 : break;
3969 : }
3970 : /* Acquire VM page pin if needed and we don't have it. */
3971 1106 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3972 0 : visibilitymap_pin(relation, block, &vmbuffer);
3973 : /* Re-acquire the lock on the old tuple's page. */
3974 1106 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3975 : /* Re-check using the up-to-date free space */
3976 1106 : pagefree = PageGetHeapFreeSpace(page);
3977 1106 : if (newtupsize > pagefree ||
3978 1106 : (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
3979 : {
3980 : /*
3981 : * Rats, it doesn't fit anymore, or somebody just now set the
3982 : * all-visible flag. We must now unlock and loop to avoid
3983 : * deadlock. Fortunately, this path should seldom be taken.
3984 : */
3985 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3986 : }
3987 : else
3988 : {
3989 : /* We're all done. */
3990 1106 : newbuf = buffer;
3991 1106 : break;
3992 : }
3993 : }
3994 : }
3995 : else
3996 : {
3997 : /* No TOAST work needed, and it'll fit on same page */
3998 308668 : newbuf = buffer;
3999 308668 : heaptup = newtup;
4000 : }
4001 :
4002 : /*
4003 : * We're about to do the actual update -- check for conflict first, to
4004 : * avoid possibly having to roll back work we've just done.
4005 : *
4006 : * This is safe without a recheck as long as there is no possibility of
4007 : * another process scanning the pages between this check and the update
4008 : * being visible to the scan (i.e., exclusive buffer content lock(s) are
4009 : * continuously held from this point until the tuple update is visible).
4010 : *
4011 : * For the new tuple the only check needed is at the relation level, but
4012 : * since both tuples are in the same relation and the check for oldtup
4013 : * will include checking the relation level, there is no benefit to a
4014 : * separate check for the new tuple.
4015 : */
4016 609112 : CheckForSerializableConflictIn(relation, &oldtup.t_self,
4017 : BufferGetBlockNumber(buffer));
4018 :
4019 : /*
4020 : * At this point newbuf and buffer are both pinned and locked, and newbuf
4021 : * has enough space for the new tuple. If they are the same buffer, only
4022 : * one pin is held.
4023 : */
4024 :
4025 609088 : if (newbuf == buffer)
4026 : {
4027 : /*
4028 : * Since the new tuple is going into the same page, we might be able
4029 : * to do a HOT update. Check if any of the index columns have been
4030 : * changed.
4031 : */
4032 309750 : if (!bms_overlap(modified_attrs, hot_attrs))
4033 : {
4034 285932 : use_hot_update = true;
4035 :
4036 : /*
4037 : * If none of the columns that are used in hot-blocking indexes
4038 : * were updated, we can apply HOT, but we do still need to check
4039 : * if we need to update the summarizing indexes, and update those
4040 : * indexes if the columns were updated, or we may fail to detect
4041 : * e.g. value bound changes in BRIN minmax indexes.
4042 : */
4043 285932 : if (bms_overlap(modified_attrs, sum_attrs))
4044 3282 : summarized_update = true;
4045 : }
4046 : }
4047 : else
4048 : {
4049 : /* Set a hint that the old page could use prune/defrag */
4050 299338 : PageSetFull(page);
4051 : }
4052 :
4053 : /*
4054 : * Compute replica identity tuple before entering the critical section so
4055 : * we don't PANIC upon a memory allocation failure.
4056 : * ExtractReplicaIdentity() will return NULL if nothing needs to be
4057 : * logged. Pass old key required as true only if the replica identity key
4058 : * columns are modified or it has external data.
4059 : */
4060 609088 : old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
4061 609088 : bms_overlap(modified_attrs, id_attrs) ||
4062 : id_has_external,
4063 : &old_key_copied);
4064 :
4065 : /* NO EREPORT(ERROR) from here till changes are logged */
4066 609088 : START_CRIT_SECTION();
4067 :
4068 : /*
4069 : * If this transaction commits, the old tuple will become DEAD sooner or
4070 : * later. Set flag that this page is a candidate for pruning once our xid
4071 : * falls below the OldestXmin horizon. If the transaction finally aborts,
4072 : * the subsequent page pruning will be a no-op and the hint will be
4073 : * cleared.
4074 : *
4075 : * XXX Should we set hint on newbuf as well? If the transaction aborts,
4076 : * there would be a prunable tuple in the newbuf; but for now we choose
4077 : * not to optimize for aborts. Note that heap_xlog_update must be kept in
4078 : * sync if this decision changes.
4079 : */
4080 609088 : PageSetPrunable(page, xid);
4081 :
4082 609088 : if (use_hot_update)
4083 : {
4084 : /* Mark the old tuple as HOT-updated */
4085 285932 : HeapTupleSetHotUpdated(&oldtup);
4086 : /* And mark the new tuple as heap-only */
4087 285932 : HeapTupleSetHeapOnly(heaptup);
4088 : /* Mark the caller's copy too, in case different from heaptup */
4089 285932 : HeapTupleSetHeapOnly(newtup);
4090 : }
4091 : else
4092 : {
4093 : /* Make sure tuples are correctly marked as not-HOT */
4094 323156 : HeapTupleClearHotUpdated(&oldtup);
4095 323156 : HeapTupleClearHeapOnly(heaptup);
4096 323156 : HeapTupleClearHeapOnly(newtup);
4097 : }
4098 :
4099 609088 : RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4100 :
4101 :
4102 : /* Clear obsolete visibility flags, possibly set by ourselves above... */
4103 609088 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4104 609088 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4105 : /* ... and store info about transaction updating this tuple */
4106 : Assert(TransactionIdIsValid(xmax_old_tuple));
4107 609088 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
4108 609088 : oldtup.t_data->t_infomask |= infomask_old_tuple;
4109 609088 : oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4110 609088 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
4111 :
4112 : /* record address of new tuple in t_ctid of old one */
4113 609088 : oldtup.t_data->t_ctid = heaptup->t_self;
4114 :
4115 : /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
4116 609088 : if (PageIsAllVisible(BufferGetPage(buffer)))
4117 : {
4118 3370 : all_visible_cleared = true;
4119 3370 : PageClearAllVisible(BufferGetPage(buffer));
4120 3370 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4121 : vmbuffer, VISIBILITYMAP_VALID_BITS);
4122 : }
4123 609088 : if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
4124 : {
4125 1694 : all_visible_cleared_new = true;
4126 1694 : PageClearAllVisible(BufferGetPage(newbuf));
4127 1694 : visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
4128 : vmbuffer_new, VISIBILITYMAP_VALID_BITS);
4129 : }
4130 :
4131 609088 : if (newbuf != buffer)
4132 299338 : MarkBufferDirty(newbuf);
4133 609088 : MarkBufferDirty(buffer);
4134 :
4135 : /* XLOG stuff */
4136 609088 : if (RelationNeedsWAL(relation))
4137 : {
4138 : XLogRecPtr recptr;
4139 :
4140 : /*
4141 : * For logical decoding we need combo CIDs to properly decode the
4142 : * catalog.
4143 : */
4144 586358 : if (RelationIsAccessibleInLogicalDecoding(relation))
4145 : {
4146 5112 : log_heap_new_cid(relation, &oldtup);
4147 5112 : log_heap_new_cid(relation, heaptup);
4148 : }
4149 :
4150 586358 : recptr = log_heap_update(relation, buffer,
4151 : newbuf, &oldtup, heaptup,
4152 : old_key_tuple,
4153 : all_visible_cleared,
4154 : all_visible_cleared_new);
4155 586358 : if (newbuf != buffer)
4156 : {
4157 279092 : PageSetLSN(BufferGetPage(newbuf), recptr);
4158 : }
4159 586358 : PageSetLSN(BufferGetPage(buffer), recptr);
4160 : }
4161 :
4162 609088 : END_CRIT_SECTION();
4163 :
4164 609088 : if (newbuf != buffer)
4165 299338 : LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
4166 609088 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4167 :
4168 : /*
4169 : * Mark old tuple for invalidation from system caches at next command
4170 : * boundary, and mark the new tuple for invalidation in case we abort. We
4171 : * have to do this before releasing the buffer because oldtup is in the
4172 : * buffer. (heaptup is all in local memory, but it's necessary to process
4173 : * both tuple versions in one call to inval.c so we can avoid redundant
4174 : * sinval messages.)
4175 : */
4176 609088 : CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
4177 :
4178 : /* Now we can release the buffer(s) */
4179 609088 : if (newbuf != buffer)
4180 299338 : ReleaseBuffer(newbuf);
4181 609088 : ReleaseBuffer(buffer);
4182 609088 : if (BufferIsValid(vmbuffer_new))
4183 1694 : ReleaseBuffer(vmbuffer_new);
4184 609088 : if (BufferIsValid(vmbuffer))
4185 3370 : ReleaseBuffer(vmbuffer);
4186 :
4187 : /*
4188 : * Release the lmgr tuple lock, if we had it.
4189 : */
4190 609088 : if (have_tuple_lock)
4191 44 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4192 :
4193 609088 : pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4194 :
4195 : /*
4196 : * If heaptup is a private copy, release it. Don't forget to copy t_self
4197 : * back to the caller's image, too.
4198 : */
4199 609088 : if (heaptup != newtup)
4200 : {
4201 3214 : newtup->t_self = heaptup->t_self;
4202 3214 : heap_freetuple(heaptup);
4203 : }
4204 :
4205 : /*
4206 : * If it is a HOT update, the update may still need to update summarized
4207 : * indexes, lest we fail to update those summaries and get incorrect
4208 : * results (for example, minmax bounds of the block may change with this
4209 : * update).
4210 : */
4211 609088 : if (use_hot_update)
4212 : {
4213 285932 : if (summarized_update)
4214 3282 : *update_indexes = TU_Summarizing;
4215 : else
4216 282650 : *update_indexes = TU_None;
4217 : }
4218 : else
4219 323156 : *update_indexes = TU_All;
4220 :
4221 609088 : if (old_key_tuple != NULL && old_key_copied)
4222 168 : heap_freetuple(old_key_tuple);
4223 :
4224 609088 : bms_free(hot_attrs);
4225 609088 : bms_free(sum_attrs);
4226 609088 : bms_free(key_attrs);
4227 609088 : bms_free(id_attrs);
4228 609088 : bms_free(modified_attrs);
4229 609088 : bms_free(interesting_attrs);
4230 :
4231 609088 : return TM_Ok;
4232 : }
4233 :
4234 : #ifdef USE_ASSERT_CHECKING
4235 : /*
4236 : * Confirm adequate lock held during heap_update(), per rules from
4237 : * README.tuplock section "Locking to write inplace-updated tables".
4238 : */
4239 : static void
4240 : check_lock_if_inplace_updateable_rel(Relation relation,
4241 : ItemPointer otid,
4242 : HeapTuple newtup)
4243 : {
4244 : /* LOCKTAG_TUPLE acceptable for any catalog */
4245 : switch (RelationGetRelid(relation))
4246 : {
4247 : case RelationRelationId:
4248 : case DatabaseRelationId:
4249 : {
4250 : LOCKTAG tuptag;
4251 :
4252 : SET_LOCKTAG_TUPLE(tuptag,
4253 : relation->rd_lockInfo.lockRelId.dbId,
4254 : relation->rd_lockInfo.lockRelId.relId,
4255 : ItemPointerGetBlockNumber(otid),
4256 : ItemPointerGetOffsetNumber(otid));
4257 : if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
4258 : return;
4259 : }
4260 : break;
4261 : default:
4262 : Assert(!IsInplaceUpdateRelation(relation));
4263 : return;
4264 : }
4265 :
4266 : switch (RelationGetRelid(relation))
4267 : {
4268 : case RelationRelationId:
4269 : {
4270 : /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4271 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
4272 : Oid relid = classForm->oid;
4273 : Oid dbid;
4274 : LOCKTAG tag;
4275 :
4276 : if (IsSharedRelation(relid))
4277 : dbid = InvalidOid;
4278 : else
4279 : dbid = MyDatabaseId;
4280 :
4281 : if (classForm->relkind == RELKIND_INDEX)
4282 : {
4283 : Relation irel = index_open(relid, AccessShareLock);
4284 :
4285 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4286 : index_close(irel, AccessShareLock);
4287 : }
4288 : else
4289 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4290 :
4291 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4292 : !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
4293 : elog(WARNING,
4294 : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4295 : NameStr(classForm->relname),
4296 : relid,
4297 : classForm->relkind,
4298 : ItemPointerGetBlockNumber(otid),
4299 : ItemPointerGetOffsetNumber(otid));
4300 : }
4301 : break;
4302 : case DatabaseRelationId:
4303 : {
4304 : /* LOCKTAG_TUPLE required */
4305 : Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
4306 :
4307 : elog(WARNING,
4308 : "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4309 : NameStr(dbForm->datname),
4310 : dbForm->oid,
4311 : ItemPointerGetBlockNumber(otid),
4312 : ItemPointerGetOffsetNumber(otid));
4313 : }
4314 : break;
4315 : }
4316 : }
4317 :
4318 : /*
4319 : * Confirm adequate relation lock held, per rules from README.tuplock section
4320 : * "Locking to write inplace-updated tables".
4321 : */
4322 : static void
4323 : check_inplace_rel_lock(HeapTuple oldtup)
4324 : {
4325 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
4326 : Oid relid = classForm->oid;
4327 : Oid dbid;
4328 : LOCKTAG tag;
4329 :
4330 : if (IsSharedRelation(relid))
4331 : dbid = InvalidOid;
4332 : else
4333 : dbid = MyDatabaseId;
4334 :
4335 : if (classForm->relkind == RELKIND_INDEX)
4336 : {
4337 : Relation irel = index_open(relid, AccessShareLock);
4338 :
4339 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4340 : index_close(irel, AccessShareLock);
4341 : }
4342 : else
4343 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4344 :
4345 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
4346 : elog(WARNING,
4347 : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4348 : NameStr(classForm->relname),
4349 : relid,
4350 : classForm->relkind,
4351 : ItemPointerGetBlockNumber(&oldtup->t_self),
4352 : ItemPointerGetOffsetNumber(&oldtup->t_self));
4353 : }
4354 : #endif
4355 :
4356 : /*
4357 : * Check if the specified attribute's values are the same. Subroutine for
4358 : * HeapDetermineColumnsInfo.
4359 : */
4360 : static bool
4361 1473588 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
4362 : bool isnull1, bool isnull2)
4363 : {
4364 : /*
4365 : * If one value is NULL and other is not, then they are certainly not
4366 : * equal
4367 : */
4368 1473588 : if (isnull1 != isnull2)
4369 90 : return false;
4370 :
4371 : /*
4372 : * If both are NULL, they can be considered equal.
4373 : */
4374 1473498 : if (isnull1)
4375 9982 : return true;
4376 :
4377 : /*
4378 : * We do simple binary comparison of the two datums. This may be overly
4379 : * strict because there can be multiple binary representations for the
4380 : * same logical value. But we should be OK as long as there are no false
4381 : * positives. Using a type-specific equality operator is messy because
4382 : * there could be multiple notions of equality in different operator
4383 : * classes; furthermore, we cannot safely invoke user-defined functions
4384 : * while holding exclusive buffer lock.
4385 : */
4386 1463516 : if (attrnum <= 0)
4387 : {
4388 : /* The only allowed system columns are OIDs, so do this */
4389 0 : return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
4390 : }
4391 : else
4392 : {
4393 : CompactAttribute *att;
4394 :
4395 : Assert(attrnum <= tupdesc->natts);
4396 1463516 : att = TupleDescCompactAttr(tupdesc, attrnum - 1);
4397 1463516 : return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4398 : }
4399 : }
4400 :
4401 : /*
4402 : * Check which columns are being updated.
4403 : *
4404 : * Given an updated tuple, determine (and return into the output bitmapset),
4405 : * from those listed as interesting, the set of columns that changed.
4406 : *
4407 : * has_external indicates if any of the unmodified attributes (from those
4408 : * listed as interesting) of the old tuple is a member of external_cols and is
4409 : * stored externally.
4410 : */
4411 : static Bitmapset *
4412 609428 : HeapDetermineColumnsInfo(Relation relation,
4413 : Bitmapset *interesting_cols,
4414 : Bitmapset *external_cols,
4415 : HeapTuple oldtup, HeapTuple newtup,
4416 : bool *has_external)
4417 : {
4418 : int attidx;
4419 609428 : Bitmapset *modified = NULL;
4420 609428 : TupleDesc tupdesc = RelationGetDescr(relation);
4421 :
4422 609428 : attidx = -1;
4423 2083016 : while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4424 : {
4425 : /* attidx is zero-based, attrnum is the normal attribute number */
4426 1473588 : AttrNumber attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
4427 : Datum value1,
4428 : value2;
4429 : bool isnull1,
4430 : isnull2;
4431 :
4432 : /*
4433 : * If it's a whole-tuple reference, say "not equal". It's not really
4434 : * worth supporting this case, since it could only succeed after a
4435 : * no-op update, which is hardly a case worth optimizing for.
4436 : */
4437 1473588 : if (attrnum == 0)
4438 : {
4439 0 : modified = bms_add_member(modified, attidx);
4440 1409250 : continue;
4441 : }
4442 :
4443 : /*
4444 : * Likewise, automatically say "not equal" for any system attribute
4445 : * other than tableOID; we cannot expect these to be consistent in a
4446 : * HOT chain, or even to be set correctly yet in the new tuple.
4447 : */
4448 1473588 : if (attrnum < 0)
4449 : {
4450 0 : if (attrnum != TableOidAttributeNumber)
4451 : {
4452 0 : modified = bms_add_member(modified, attidx);
4453 0 : continue;
4454 : }
4455 : }
4456 :
4457 : /*
4458 : * Extract the corresponding values. XXX this is pretty inefficient
4459 : * if there are many indexed columns. Should we do a single
4460 : * heap_deform_tuple call on each tuple, instead? But that doesn't
4461 : * work for system columns ...
4462 : */
4463 1473588 : value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4464 1473588 : value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4465 :
4466 1473588 : if (!heap_attr_equals(tupdesc, attrnum, value1,
4467 : value2, isnull1, isnull2))
4468 : {
4469 53470 : modified = bms_add_member(modified, attidx);
4470 53470 : continue;
4471 : }
4472 :
4473 : /*
4474 : * No need to check attributes that can't be stored externally. Note
4475 : * that system attributes can't be stored externally.
4476 : */
4477 1420118 : if (attrnum < 0 || isnull1 ||
4478 1410136 : TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
4479 1355780 : continue;
4480 :
4481 : /*
4482 : * Check if the old tuple's attribute is stored externally and is a
4483 : * member of external_cols.
4484 : */
4485 64348 : if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
4486 10 : bms_is_member(attidx, external_cols))
4487 4 : *has_external = true;
4488 : }
4489 :
4490 609428 : return modified;
4491 : }
4492 :
4493 : /*
4494 : * simple_heap_update - replace a tuple
4495 : *
4496 : * This routine may be used to update a tuple when concurrent updates of
4497 : * the target tuple are not expected (for example, because we have a lock
4498 : * on the relation associated with the tuple). Any failure is reported
4499 : * via ereport().
4500 : */
4501 : void
4502 222186 : simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup,
4503 : TU_UpdateIndexes *update_indexes)
4504 : {
4505 : TM_Result result;
4506 : TM_FailureData tmfd;
4507 : LockTupleMode lockmode;
4508 :
4509 222186 : result = heap_update(relation, otid, tup,
4510 : GetCurrentCommandId(true), InvalidSnapshot,
4511 : true /* wait for commit */ ,
4512 : &tmfd, &lockmode, update_indexes);
4513 222186 : switch (result)
4514 : {
4515 0 : case TM_SelfModified:
4516 : /* Tuple was already updated in current command? */
4517 0 : elog(ERROR, "tuple already updated by self");
4518 : break;
4519 :
4520 222184 : case TM_Ok:
4521 : /* done successfully */
4522 222184 : break;
4523 :
4524 0 : case TM_Updated:
4525 0 : elog(ERROR, "tuple concurrently updated");
4526 : break;
4527 :
4528 2 : case TM_Deleted:
4529 2 : elog(ERROR, "tuple concurrently deleted");
4530 : break;
4531 :
4532 0 : default:
4533 0 : elog(ERROR, "unrecognized heap_update status: %u", result);
4534 : break;
4535 : }
4536 222184 : }
4537 :
4538 :
4539 : /*
4540 : * Return the MultiXactStatus corresponding to the given tuple lock mode.
4541 : */
4542 : static MultiXactStatus
4543 2390 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4544 : {
4545 : int retval;
4546 :
4547 2390 : if (is_update)
4548 192 : retval = tupleLockExtraInfo[mode].updstatus;
4549 : else
4550 2198 : retval = tupleLockExtraInfo[mode].lockstatus;
4551 :
4552 2390 : if (retval == -1)
4553 0 : elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4554 : is_update ? "true" : "false");
4555 :
4556 2390 : return (MultiXactStatus) retval;
4557 : }
4558 :
4559 : /*
4560 : * heap_lock_tuple - lock a tuple in shared or exclusive mode
4561 : *
4562 : * Note that this acquires a buffer pin, which the caller must release.
4563 : *
4564 : * Input parameters:
4565 : * relation: relation containing tuple (caller must hold suitable lock)
4566 : * cid: current command ID (used for visibility test, and stored into
4567 : * tuple's cmax if lock is successful)
4568 : * mode: indicates if shared or exclusive tuple lock is desired
4569 : * wait_policy: what to do if tuple lock is not available
4570 : * follow_updates: if true, follow the update chain to also lock descendant
4571 : * tuples.
4572 : *
4573 : * Output parameters:
4574 : * *tuple: all fields filled in
4575 : * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4576 : * *tmfd: filled in failure cases (see below)
4577 : *
4578 : * Function results are the same as the ones for table_tuple_lock().
4579 : *
4580 : * In the failure cases other than TM_Invisible, the routine fills
4581 : * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4582 : * if necessary), and t_cmax (the last only for TM_SelfModified,
4583 : * since we cannot obtain cmax from a combo CID generated by another
4584 : * transaction).
4585 : * See comments for struct TM_FailureData for additional info.
4586 : *
4587 : * See README.tuplock for a thorough explanation of this mechanism.
4588 : */
4589 : TM_Result
4590 169906 : heap_lock_tuple(Relation relation, HeapTuple tuple,
4591 : CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4592 : bool follow_updates,
4593 : Buffer *buffer, TM_FailureData *tmfd)
4594 : {
4595 : TM_Result result;
4596 169906 : ItemPointer tid = &(tuple->t_self);
4597 : ItemId lp;
4598 : Page page;
4599 169906 : Buffer vmbuffer = InvalidBuffer;
4600 : BlockNumber block;
4601 : TransactionId xid,
4602 : xmax;
4603 : uint16 old_infomask,
4604 : new_infomask,
4605 : new_infomask2;
4606 169906 : bool first_time = true;
4607 169906 : bool skip_tuple_lock = false;
4608 169906 : bool have_tuple_lock = false;
4609 169906 : bool cleared_all_frozen = false;
4610 :
4611 169906 : *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4612 169906 : block = ItemPointerGetBlockNumber(tid);
4613 :
4614 : /*
4615 : * Before locking the buffer, pin the visibility map page if it appears to
4616 : * be necessary. Since we haven't got the lock yet, someone else might be
4617 : * in the middle of changing this, so we'll need to recheck after we have
4618 : * the lock.
4619 : */
4620 169906 : if (PageIsAllVisible(BufferGetPage(*buffer)))
4621 3334 : visibilitymap_pin(relation, block, &vmbuffer);
4622 :
4623 169906 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4624 :
4625 169906 : page = BufferGetPage(*buffer);
4626 169906 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4627 : Assert(ItemIdIsNormal(lp));
4628 :
4629 169906 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4630 169906 : tuple->t_len = ItemIdGetLength(lp);
4631 169906 : tuple->t_tableOid = RelationGetRelid(relation);
4632 :
4633 28 : l3:
4634 169934 : result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4635 :
4636 169934 : if (result == TM_Invisible)
4637 : {
4638 : /*
4639 : * This is possible, but only when locking a tuple for ON CONFLICT
4640 : * UPDATE. We return this value here rather than throwing an error in
4641 : * order to give that case the opportunity to throw a more specific
4642 : * error.
4643 : */
4644 24 : result = TM_Invisible;
4645 24 : goto out_locked;
4646 : }
4647 169910 : else if (result == TM_BeingModified ||
4648 154136 : result == TM_Updated ||
4649 : result == TM_Deleted)
4650 : {
4651 : TransactionId xwait;
4652 : uint16 infomask;
4653 : uint16 infomask2;
4654 : bool require_sleep;
4655 : ItemPointerData t_ctid;
4656 :
4657 : /* must copy state data before unlocking buffer */
4658 15776 : xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4659 15776 : infomask = tuple->t_data->t_infomask;
4660 15776 : infomask2 = tuple->t_data->t_infomask2;
4661 15776 : ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4662 :
4663 15776 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4664 :
4665 : /*
4666 : * If any subtransaction of the current top transaction already holds
4667 : * a lock as strong as or stronger than what we're requesting, we
4668 : * effectively hold the desired lock already. We *must* succeed
4669 : * without trying to take the tuple lock, else we will deadlock
4670 : * against anyone wanting to acquire a stronger lock.
4671 : *
4672 : * Note we only do this the first time we loop on the HTSU result;
4673 : * there is no point in testing in subsequent passes, because
4674 : * evidently our own transaction cannot have acquired a new lock after
4675 : * the first time we checked.
4676 : */
4677 15776 : if (first_time)
4678 : {
4679 15758 : first_time = false;
4680 :
4681 15758 : if (infomask & HEAP_XMAX_IS_MULTI)
4682 : {
4683 : int i;
4684 : int nmembers;
4685 : MultiXactMember *members;
4686 :
4687 : /*
4688 : * We don't need to allow old multixacts here; if that had
4689 : * been the case, HeapTupleSatisfiesUpdate would have returned
4690 : * MayBeUpdated and we wouldn't be here.
4691 : */
4692 : nmembers =
4693 156 : GetMultiXactIdMembers(xwait, &members, false,
4694 156 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4695 :
4696 466 : for (i = 0; i < nmembers; i++)
4697 : {
4698 : /* only consider members of our own transaction */
4699 338 : if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4700 240 : continue;
4701 :
4702 98 : if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4703 : {
4704 28 : pfree(members);
4705 28 : result = TM_Ok;
4706 28 : goto out_unlocked;
4707 : }
4708 : else
4709 : {
4710 : /*
4711 : * Disable acquisition of the heavyweight tuple lock.
4712 : * Otherwise, when promoting a weaker lock, we might
4713 : * deadlock with another locker that has acquired the
4714 : * heavyweight tuple lock and is waiting for our
4715 : * transaction to finish.
4716 : *
4717 : * Note that in this case we still need to wait for
4718 : * the multixact if required, to avoid acquiring
4719 : * conflicting locks.
4720 : */
4721 70 : skip_tuple_lock = true;
4722 : }
4723 : }
4724 :
4725 128 : if (members)
4726 128 : pfree(members);
4727 : }
4728 15602 : else if (TransactionIdIsCurrentTransactionId(xwait))
4729 : {
4730 13104 : switch (mode)
4731 : {
4732 332 : case LockTupleKeyShare:
4733 : Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4734 : HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4735 : HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4736 332 : result = TM_Ok;
4737 332 : goto out_unlocked;
4738 38 : case LockTupleShare:
4739 50 : if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4740 12 : HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4741 : {
4742 26 : result = TM_Ok;
4743 26 : goto out_unlocked;
4744 : }
4745 12 : break;
4746 130 : case LockTupleNoKeyExclusive:
4747 130 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4748 : {
4749 106 : result = TM_Ok;
4750 106 : goto out_unlocked;
4751 : }
4752 24 : break;
4753 12604 : case LockTupleExclusive:
4754 12604 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4755 2524 : infomask2 & HEAP_KEYS_UPDATED)
4756 : {
4757 2482 : result = TM_Ok;
4758 2482 : goto out_unlocked;
4759 : }
4760 10122 : break;
4761 : }
4762 : }
4763 : }
4764 :
4765 : /*
4766 : * Initially assume that we will have to wait for the locking
4767 : * transaction(s) to finish. We check various cases below in which
4768 : * this can be turned off.
4769 : */
4770 12802 : require_sleep = true;
4771 12802 : if (mode == LockTupleKeyShare)
4772 : {
4773 : /*
4774 : * If we're requesting KeyShare, and there's no update present, we
4775 : * don't need to wait. Even if there is an update, we can still
4776 : * continue if the key hasn't been modified.
4777 : *
4778 : * However, if there are updates, we need to walk the update chain
4779 : * to mark future versions of the row as locked, too. That way,
4780 : * if somebody deletes that future version, we're protected
4781 : * against the key going away. This locking of future versions
4782 : * could block momentarily, if a concurrent transaction is
4783 : * deleting a key; or it could return a value to the effect that
4784 : * the transaction deleting the key has already committed. So we
4785 : * do this before re-locking the buffer; otherwise this would be
4786 : * prone to deadlocks.
4787 : *
4788 : * Note that the TID we're locking was grabbed before we unlocked
4789 : * the buffer. For it to change while we're not looking, the
4790 : * other properties we're testing for below after re-locking the
4791 : * buffer would also change, in which case we would restart this
4792 : * loop above.
4793 : */
4794 1170 : if (!(infomask2 & HEAP_KEYS_UPDATED))
4795 : {
4796 : bool updated;
4797 :
4798 1084 : updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4799 :
4800 : /*
4801 : * If there are updates, follow the update chain; bail out if
4802 : * that cannot be done.
4803 : */
4804 1084 : if (follow_updates && updated)
4805 : {
4806 : TM_Result res;
4807 :
4808 100 : res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4809 : GetCurrentTransactionId(),
4810 : mode);
4811 100 : if (res != TM_Ok)
4812 : {
4813 12 : result = res;
4814 : /* recovery code expects to have buffer lock held */
4815 12 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4816 380 : goto failed;
4817 : }
4818 : }
4819 :
4820 1072 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4821 :
4822 : /*
4823 : * Make sure it's still an appropriate lock, else start over.
4824 : * Also, if it wasn't updated before we released the lock, but
4825 : * is updated now, we start over too; the reason is that we
4826 : * now need to follow the update chain to lock the new
4827 : * versions.
4828 : */
4829 1072 : if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4830 86 : ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4831 86 : !updated))
4832 28 : goto l3;
4833 :
4834 : /* Things look okay, so we can skip sleeping */
4835 1072 : require_sleep = false;
4836 :
4837 : /*
4838 : * Note we allow Xmax to change here; other updaters/lockers
4839 : * could have modified it before we grabbed the buffer lock.
4840 : * However, this is not a problem, because with the recheck we
4841 : * just did we ensure that they still don't conflict with the
4842 : * lock we want.
4843 : */
4844 : }
4845 : }
4846 11632 : else if (mode == LockTupleShare)
4847 : {
4848 : /*
4849 : * If we're requesting Share, we can similarly avoid sleeping if
4850 : * there's no update and no exclusive lock present.
4851 : */
4852 886 : if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4853 886 : !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4854 : {
4855 874 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4856 :
4857 : /*
4858 : * Make sure it's still an appropriate lock, else start over.
4859 : * See above about allowing xmax to change.
4860 : */
4861 1748 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4862 874 : HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4863 0 : goto l3;
4864 874 : require_sleep = false;
4865 : }
4866 : }
4867 10746 : else if (mode == LockTupleNoKeyExclusive)
4868 : {
4869 : /*
4870 : * If we're requesting NoKeyExclusive, we might also be able to
4871 : * avoid sleeping; just ensure that there no conflicting lock
4872 : * already acquired.
4873 : */
4874 326 : if (infomask & HEAP_XMAX_IS_MULTI)
4875 : {
4876 52 : if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4877 : mode, NULL))
4878 : {
4879 : /*
4880 : * No conflict, but if the xmax changed under us in the
4881 : * meantime, start over.
4882 : */
4883 26 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4884 52 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4885 26 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4886 : xwait))
4887 0 : goto l3;
4888 :
4889 : /* otherwise, we're good */
4890 26 : require_sleep = false;
4891 : }
4892 : }
4893 274 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4894 : {
4895 36 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4896 :
4897 : /* if the xmax changed in the meantime, start over */
4898 72 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4899 36 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4900 : xwait))
4901 0 : goto l3;
4902 : /* otherwise, we're good */
4903 36 : require_sleep = false;
4904 : }
4905 : }
4906 :
4907 : /*
4908 : * As a check independent from those above, we can also avoid sleeping
4909 : * if the current transaction is the sole locker of the tuple. Note
4910 : * that the strength of the lock already held is irrelevant; this is
4911 : * not about recording the lock in Xmax (which will be done regardless
4912 : * of this optimization, below). Also, note that the cases where we
4913 : * hold a lock stronger than we are requesting are already handled
4914 : * above by not doing anything.
4915 : *
4916 : * Note we only deal with the non-multixact case here; MultiXactIdWait
4917 : * is well equipped to deal with this situation on its own.
4918 : */
4919 23490 : if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4920 10700 : TransactionIdIsCurrentTransactionId(xwait))
4921 : {
4922 : /* ... but if the xmax changed in the meantime, start over */
4923 10122 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4924 20244 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4925 10122 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4926 : xwait))
4927 0 : goto l3;
4928 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
4929 10122 : require_sleep = false;
4930 : }
4931 :
4932 : /*
4933 : * Time to sleep on the other transaction/multixact, if necessary.
4934 : *
4935 : * If the other transaction is an update/delete that's already
4936 : * committed, then sleeping cannot possibly do any good: if we're
4937 : * required to sleep, get out to raise an error instead.
4938 : *
4939 : * By here, we either have already acquired the buffer exclusive lock,
4940 : * or we must wait for the locking transaction or multixact; so below
4941 : * we ensure that we grab buffer lock after the sleep.
4942 : */
4943 12790 : if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4944 : {
4945 292 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4946 292 : goto failed;
4947 : }
4948 12498 : else if (require_sleep)
4949 : {
4950 : /*
4951 : * Acquire tuple lock to establish our priority for the tuple, or
4952 : * die trying. LockTuple will release us when we are next-in-line
4953 : * for the tuple. We must do this even if we are share-locking,
4954 : * but not if we already have a weaker lock on the tuple.
4955 : *
4956 : * If we are forced to "start over" below, we keep the tuple lock;
4957 : * this arranges that we stay at the head of the line while
4958 : * rechecking tuple state.
4959 : */
4960 368 : if (!skip_tuple_lock &&
4961 336 : !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4962 : &have_tuple_lock))
4963 : {
4964 : /*
4965 : * This can only happen if wait_policy is Skip and the lock
4966 : * couldn't be obtained.
4967 : */
4968 2 : result = TM_WouldBlock;
4969 : /* recovery code expects to have buffer lock held */
4970 2 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4971 2 : goto failed;
4972 : }
4973 :
4974 364 : if (infomask & HEAP_XMAX_IS_MULTI)
4975 : {
4976 80 : MultiXactStatus status = get_mxact_status_for_lock(mode, false);
4977 :
4978 : /* We only ever lock tuples, never update them */
4979 80 : if (status >= MultiXactStatusNoKeyUpdate)
4980 0 : elog(ERROR, "invalid lock mode in heap_lock_tuple");
4981 :
4982 : /* wait for multixact to end, or die trying */
4983 80 : switch (wait_policy)
4984 : {
4985 72 : case LockWaitBlock:
4986 72 : MultiXactIdWait((MultiXactId) xwait, status, infomask,
4987 : relation, &tuple->t_self, XLTW_Lock, NULL);
4988 72 : break;
4989 4 : case LockWaitSkip:
4990 4 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
4991 : status, infomask, relation,
4992 : NULL, false))
4993 : {
4994 4 : result = TM_WouldBlock;
4995 : /* recovery code expects to have buffer lock held */
4996 4 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4997 4 : goto failed;
4998 : }
4999 0 : break;
5000 4 : case LockWaitError:
5001 4 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
5002 : status, infomask, relation,
5003 : NULL, log_lock_failures))
5004 4 : ereport(ERROR,
5005 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5006 : errmsg("could not obtain lock on row in relation \"%s\"",
5007 : RelationGetRelationName(relation))));
5008 :
5009 0 : break;
5010 : }
5011 :
5012 : /*
5013 : * Of course, the multixact might not be done here: if we're
5014 : * requesting a light lock mode, other transactions with light
5015 : * locks could still be alive, as well as locks owned by our
5016 : * own xact or other subxacts of this backend. We need to
5017 : * preserve the surviving MultiXact members. Note that it
5018 : * isn't absolutely necessary in the latter case, but doing so
5019 : * is simpler.
5020 : */
5021 : }
5022 : else
5023 : {
5024 : /* wait for regular transaction to end, or die trying */
5025 284 : switch (wait_policy)
5026 : {
5027 206 : case LockWaitBlock:
5028 206 : XactLockTableWait(xwait, relation, &tuple->t_self,
5029 : XLTW_Lock);
5030 206 : break;
5031 66 : case LockWaitSkip:
5032 66 : if (!ConditionalXactLockTableWait(xwait, false))
5033 : {
5034 66 : result = TM_WouldBlock;
5035 : /* recovery code expects to have buffer lock held */
5036 66 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5037 66 : goto failed;
5038 : }
5039 0 : break;
5040 12 : case LockWaitError:
5041 12 : if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
5042 12 : ereport(ERROR,
5043 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5044 : errmsg("could not obtain lock on row in relation \"%s\"",
5045 : RelationGetRelationName(relation))));
5046 0 : break;
5047 : }
5048 : }
5049 :
5050 : /* if there are updates, follow the update chain */
5051 278 : if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5052 : {
5053 : TM_Result res;
5054 :
5055 106 : res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5056 : GetCurrentTransactionId(),
5057 : mode);
5058 106 : if (res != TM_Ok)
5059 : {
5060 4 : result = res;
5061 : /* recovery code expects to have buffer lock held */
5062 4 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5063 4 : goto failed;
5064 : }
5065 : }
5066 :
5067 274 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5068 :
5069 : /*
5070 : * xwait is done, but if xwait had just locked the tuple then some
5071 : * other xact could update this tuple before we get to this point.
5072 : * Check for xmax change, and start over if so.
5073 : */
5074 524 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5075 250 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
5076 : xwait))
5077 28 : goto l3;
5078 :
5079 246 : if (!(infomask & HEAP_XMAX_IS_MULTI))
5080 : {
5081 : /*
5082 : * Otherwise check if it committed or aborted. Note we cannot
5083 : * be here if the tuple was only locked by somebody who didn't
5084 : * conflict with us; that would have been handled above. So
5085 : * that transaction must necessarily be gone by now. But
5086 : * don't check for this in the multixact case, because some
5087 : * locker transactions might still be running.
5088 : */
5089 184 : UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5090 : }
5091 : }
5092 :
5093 : /* By here, we're certain that we hold buffer exclusive lock again */
5094 :
5095 : /*
5096 : * We may lock if previous xmax aborted, or if it committed but only
5097 : * locked the tuple without updating it; or if we didn't have to wait
5098 : * at all for whatever reason.
5099 : */
5100 12376 : if (!require_sleep ||
5101 432 : (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5102 342 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
5103 156 : HeapTupleHeaderIsOnlyLocked(tuple->t_data))
5104 12232 : result = TM_Ok;
5105 144 : else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
5106 108 : result = TM_Updated;
5107 : else
5108 36 : result = TM_Deleted;
5109 : }
5110 :
5111 154134 : failed:
5112 166890 : if (result != TM_Ok)
5113 : {
5114 : Assert(result == TM_SelfModified || result == TM_Updated ||
5115 : result == TM_Deleted || result == TM_WouldBlock);
5116 :
5117 : /*
5118 : * When locking a tuple under LockWaitSkip semantics and we fail with
5119 : * TM_WouldBlock above, it's possible for concurrent transactions to
5120 : * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5121 : * this assert is slightly different from the equivalent one in
5122 : * heap_delete and heap_update.
5123 : */
5124 : Assert((result == TM_WouldBlock) ||
5125 : !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5126 : Assert(result != TM_Updated ||
5127 : !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5128 536 : tmfd->ctid = tuple->t_data->t_ctid;
5129 536 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5130 536 : if (result == TM_SelfModified)
5131 12 : tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5132 : else
5133 524 : tmfd->cmax = InvalidCommandId;
5134 536 : goto out_locked;
5135 : }
5136 :
5137 : /*
5138 : * If we didn't pin the visibility map page and the page has become all
5139 : * visible while we were busy locking the buffer, or during some
5140 : * subsequent window during which we had it unlocked, we'll have to unlock
5141 : * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5142 : * unfortunate, especially since we'll now have to recheck whether the
5143 : * tuple has been locked or updated under us, but hopefully it won't
5144 : * happen very often.
5145 : */
5146 166354 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5147 : {
5148 0 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5149 0 : visibilitymap_pin(relation, block, &vmbuffer);
5150 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5151 0 : goto l3;
5152 : }
5153 :
5154 166354 : xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5155 166354 : old_infomask = tuple->t_data->t_infomask;
5156 :
5157 : /*
5158 : * If this is the first possibly-multixact-able operation in the current
5159 : * transaction, set my per-backend OldestMemberMXactId setting. We can be
5160 : * certain that the transaction will never become a member of any older
5161 : * MultiXactIds than that. (We have to do this even if we end up just
5162 : * using our own TransactionId below, since some other backend could
5163 : * incorporate our XID into a MultiXact immediately afterwards.)
5164 : */
5165 166354 : MultiXactIdSetOldestMember();
5166 :
5167 : /*
5168 : * Compute the new xmax and infomask to store into the tuple. Note we do
5169 : * not modify the tuple just yet, because that would leave it in the wrong
5170 : * state if multixact.c elogs.
5171 : */
5172 166354 : compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5173 : GetCurrentTransactionId(), mode, false,
5174 : &xid, &new_infomask, &new_infomask2);
5175 :
5176 166354 : START_CRIT_SECTION();
5177 :
5178 : /*
5179 : * Store transaction information of xact locking the tuple.
5180 : *
5181 : * Note: Cmax is meaningless in this context, so don't set it; this avoids
5182 : * possibly generating a useless combo CID. Moreover, if we're locking a
5183 : * previously updated tuple, it's important to preserve the Cmax.
5184 : *
5185 : * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5186 : * we would break the HOT chain.
5187 : */
5188 166354 : tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5189 166354 : tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5190 166354 : tuple->t_data->t_infomask |= new_infomask;
5191 166354 : tuple->t_data->t_infomask2 |= new_infomask2;
5192 166354 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5193 166276 : HeapTupleHeaderClearHotUpdated(tuple->t_data);
5194 166354 : HeapTupleHeaderSetXmax(tuple->t_data, xid);
5195 :
5196 : /*
5197 : * Make sure there is no forward chain link in t_ctid. Note that in the
5198 : * cases where the tuple has been updated, we must not overwrite t_ctid,
5199 : * because it was set by the updater. Moreover, if the tuple has been
5200 : * updated, we need to follow the update chain to lock the new versions of
5201 : * the tuple as well.
5202 : */
5203 166354 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5204 166276 : tuple->t_data->t_ctid = *tid;
5205 :
5206 : /* Clear only the all-frozen bit on visibility map if needed */
5207 169688 : if (PageIsAllVisible(page) &&
5208 3334 : visibilitymap_clear(relation, block, vmbuffer,
5209 : VISIBILITYMAP_ALL_FROZEN))
5210 30 : cleared_all_frozen = true;
5211 :
5212 :
5213 166354 : MarkBufferDirty(*buffer);
5214 :
5215 : /*
5216 : * XLOG stuff. You might think that we don't need an XLOG record because
5217 : * there is no state change worth restoring after a crash. You would be
5218 : * wrong however: we have just written either a TransactionId or a
5219 : * MultiXactId that may never have been seen on disk before, and we need
5220 : * to make sure that there are XLOG entries covering those ID numbers.
5221 : * Else the same IDs might be re-used after a crash, which would be
5222 : * disastrous if this page made it to disk before the crash. Essentially
5223 : * we have to enforce the WAL log-before-data rule even in this case.
5224 : * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5225 : * entries for everything anyway.)
5226 : */
5227 166354 : if (RelationNeedsWAL(relation))
5228 : {
5229 : xl_heap_lock xlrec;
5230 : XLogRecPtr recptr;
5231 :
5232 165666 : XLogBeginInsert();
5233 165666 : XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
5234 :
5235 165666 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5236 165666 : xlrec.xmax = xid;
5237 331332 : xlrec.infobits_set = compute_infobits(new_infomask,
5238 165666 : tuple->t_data->t_infomask2);
5239 165666 : xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5240 165666 : XLogRegisterData(&xlrec, SizeOfHeapLock);
5241 :
5242 : /* we don't decode row locks atm, so no need to log the origin */
5243 :
5244 165666 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
5245 :
5246 165666 : PageSetLSN(page, recptr);
5247 : }
5248 :
5249 166354 : END_CRIT_SECTION();
5250 :
5251 166354 : result = TM_Ok;
5252 :
5253 166914 : out_locked:
5254 166914 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5255 :
5256 169888 : out_unlocked:
5257 169888 : if (BufferIsValid(vmbuffer))
5258 3334 : ReleaseBuffer(vmbuffer);
5259 :
5260 : /*
5261 : * Don't update the visibility map here. Locking a tuple doesn't change
5262 : * visibility info.
5263 : */
5264 :
5265 : /*
5266 : * Now that we have successfully marked the tuple as locked, we can
5267 : * release the lmgr tuple lock, if we had it.
5268 : */
5269 169888 : if (have_tuple_lock)
5270 306 : UnlockTupleTuplock(relation, tid, mode);
5271 :
5272 169888 : return result;
5273 : }
5274 :
5275 : /*
5276 : * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5277 : * its normal, Xmax-based tuple lock.
5278 : *
5279 : * have_tuple_lock is an input and output parameter: on input, it indicates
5280 : * whether the lock has previously been acquired (and this function does
5281 : * nothing in that case). If this function returns success, have_tuple_lock
5282 : * has been flipped to true.
5283 : *
5284 : * Returns false if it was unable to obtain the lock; this can only happen if
5285 : * wait_policy is Skip.
5286 : */
5287 : static bool
5288 602 : heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
5289 : LockWaitPolicy wait_policy, bool *have_tuple_lock)
5290 : {
5291 602 : if (*have_tuple_lock)
5292 18 : return true;
5293 :
5294 584 : switch (wait_policy)
5295 : {
5296 502 : case LockWaitBlock:
5297 502 : LockTupleTuplock(relation, tid, mode);
5298 502 : break;
5299 :
5300 68 : case LockWaitSkip:
5301 68 : if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
5302 2 : return false;
5303 66 : break;
5304 :
5305 14 : case LockWaitError:
5306 14 : if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
5307 2 : ereport(ERROR,
5308 : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5309 : errmsg("could not obtain lock on row in relation \"%s\"",
5310 : RelationGetRelationName(relation))));
5311 12 : break;
5312 : }
5313 580 : *have_tuple_lock = true;
5314 :
5315 580 : return true;
5316 : }
5317 :
5318 : /*
5319 : * Given an original set of Xmax and infomask, and a transaction (identified by
5320 : * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5321 : * corresponding infomasks to use on the tuple.
5322 : *
5323 : * Note that this might have side effects such as creating a new MultiXactId.
5324 : *
5325 : * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5326 : * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5327 : * but it was not running anymore. There is a race condition, which is that the
5328 : * MultiXactId may have finished since then, but that uncommon case is handled
5329 : * either here, or within MultiXactIdExpand.
5330 : *
5331 : * There is a similar race condition possible when the old xmax was a regular
5332 : * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5333 : * window, but it's still possible to end up creating an unnecessary
5334 : * MultiXactId. Fortunately this is harmless.
5335 : */
5336 : static void
5337 4064498 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
5338 : uint16 old_infomask2, TransactionId add_to_xmax,
5339 : LockTupleMode mode, bool is_update,
5340 : TransactionId *result_xmax, uint16 *result_infomask,
5341 : uint16 *result_infomask2)
5342 : {
5343 : TransactionId new_xmax;
5344 : uint16 new_infomask,
5345 : new_infomask2;
5346 :
5347 : Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
5348 :
5349 207902 : l5:
5350 4272400 : new_infomask = 0;
5351 4272400 : new_infomask2 = 0;
5352 4272400 : if (old_infomask & HEAP_XMAX_INVALID)
5353 : {
5354 : /*
5355 : * No previous locker; we just insert our own TransactionId.
5356 : *
5357 : * Note that it's critical that this case be the first one checked,
5358 : * because there are several blocks below that come back to this one
5359 : * to implement certain optimizations; old_infomask might contain
5360 : * other dirty bits in those cases, but we don't really care.
5361 : */
5362 4062252 : if (is_update)
5363 : {
5364 3597368 : new_xmax = add_to_xmax;
5365 3597368 : if (mode == LockTupleExclusive)
5366 3060812 : new_infomask2 |= HEAP_KEYS_UPDATED;
5367 : }
5368 : else
5369 : {
5370 464884 : new_infomask |= HEAP_XMAX_LOCK_ONLY;
5371 464884 : switch (mode)
5372 : {
5373 5144 : case LockTupleKeyShare:
5374 5144 : new_xmax = add_to_xmax;
5375 5144 : new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5376 5144 : break;
5377 1472 : case LockTupleShare:
5378 1472 : new_xmax = add_to_xmax;
5379 1472 : new_infomask |= HEAP_XMAX_SHR_LOCK;
5380 1472 : break;
5381 266936 : case LockTupleNoKeyExclusive:
5382 266936 : new_xmax = add_to_xmax;
5383 266936 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5384 266936 : break;
5385 191332 : case LockTupleExclusive:
5386 191332 : new_xmax = add_to_xmax;
5387 191332 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5388 191332 : new_infomask2 |= HEAP_KEYS_UPDATED;
5389 191332 : break;
5390 0 : default:
5391 0 : new_xmax = InvalidTransactionId; /* silence compiler */
5392 0 : elog(ERROR, "invalid lock mode");
5393 : }
5394 : }
5395 : }
5396 210148 : else if (old_infomask & HEAP_XMAX_IS_MULTI)
5397 : {
5398 : MultiXactStatus new_status;
5399 :
5400 : /*
5401 : * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5402 : * cross-check.
5403 : */
5404 : Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5405 :
5406 : /*
5407 : * A multixact together with LOCK_ONLY set but neither lock bit set
5408 : * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5409 : * anymore. This check is critical for databases upgraded by
5410 : * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5411 : * that such multis are never passed.
5412 : */
5413 230 : if (HEAP_LOCKED_UPGRADED(old_infomask))
5414 : {
5415 0 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5416 0 : old_infomask |= HEAP_XMAX_INVALID;
5417 0 : goto l5;
5418 : }
5419 :
5420 : /*
5421 : * If the XMAX is already a MultiXactId, then we need to expand it to
5422 : * include add_to_xmax; but if all the members were lockers and are
5423 : * all gone, we can do away with the IS_MULTI bit and just set
5424 : * add_to_xmax as the only locker/updater. If all lockers are gone
5425 : * and we have an updater that aborted, we can also do without a
5426 : * multi.
5427 : *
5428 : * The cost of doing GetMultiXactIdMembers would be paid by
5429 : * MultiXactIdExpand if we weren't to do this, so this check is not
5430 : * incurring extra work anyhow.
5431 : */
5432 230 : if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5433 : {
5434 46 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5435 16 : !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
5436 : old_infomask)))
5437 : {
5438 : /*
5439 : * Reset these bits and restart; otherwise fall through to
5440 : * create a new multi below.
5441 : */
5442 46 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5443 46 : old_infomask |= HEAP_XMAX_INVALID;
5444 46 : goto l5;
5445 : }
5446 : }
5447 :
5448 184 : new_status = get_mxact_status_for_lock(mode, is_update);
5449 :
5450 184 : new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5451 : new_status);
5452 184 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5453 : }
5454 209918 : else if (old_infomask & HEAP_XMAX_COMMITTED)
5455 : {
5456 : /*
5457 : * It's a committed update, so we need to preserve him as updater of
5458 : * the tuple.
5459 : */
5460 : MultiXactStatus status;
5461 : MultiXactStatus new_status;
5462 :
5463 26 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5464 0 : status = MultiXactStatusUpdate;
5465 : else
5466 26 : status = MultiXactStatusNoKeyUpdate;
5467 :
5468 26 : new_status = get_mxact_status_for_lock(mode, is_update);
5469 :
5470 : /*
5471 : * since it's not running, it's obviously impossible for the old
5472 : * updater to be identical to the current one, so we need not check
5473 : * for that case as we do in the block above.
5474 : */
5475 26 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5476 26 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5477 : }
5478 209892 : else if (TransactionIdIsInProgress(xmax))
5479 : {
5480 : /*
5481 : * If the XMAX is a valid, in-progress TransactionId, then we need to
5482 : * create a new MultiXactId that includes both the old locker or
5483 : * updater and our own TransactionId.
5484 : */
5485 : MultiXactStatus new_status;
5486 : MultiXactStatus old_status;
5487 : LockTupleMode old_mode;
5488 :
5489 209874 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5490 : {
5491 209822 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5492 11236 : old_status = MultiXactStatusForKeyShare;
5493 198586 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5494 866 : old_status = MultiXactStatusForShare;
5495 197720 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5496 : {
5497 197720 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5498 185448 : old_status = MultiXactStatusForUpdate;
5499 : else
5500 12272 : old_status = MultiXactStatusForNoKeyUpdate;
5501 : }
5502 : else
5503 : {
5504 : /*
5505 : * LOCK_ONLY can be present alone only when a page has been
5506 : * upgraded by pg_upgrade. But in that case,
5507 : * TransactionIdIsInProgress() should have returned false. We
5508 : * assume it's no longer locked in this case.
5509 : */
5510 0 : elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5511 0 : old_infomask |= HEAP_XMAX_INVALID;
5512 0 : old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5513 0 : goto l5;
5514 : }
5515 : }
5516 : else
5517 : {
5518 : /* it's an update, but which kind? */
5519 52 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5520 0 : old_status = MultiXactStatusUpdate;
5521 : else
5522 52 : old_status = MultiXactStatusNoKeyUpdate;
5523 : }
5524 :
5525 209874 : old_mode = TUPLOCK_from_mxstatus(old_status);
5526 :
5527 : /*
5528 : * If the lock to be acquired is for the same TransactionId as the
5529 : * existing lock, there's an optimization possible: consider only the
5530 : * strongest of both locks as the only one present, and restart.
5531 : */
5532 209874 : if (xmax == add_to_xmax)
5533 : {
5534 : /*
5535 : * Note that it's not possible for the original tuple to be
5536 : * updated: we wouldn't be here because the tuple would have been
5537 : * invisible and we wouldn't try to update it. As a subtlety,
5538 : * this code can also run when traversing an update chain to lock
5539 : * future versions of a tuple. But we wouldn't be here either,
5540 : * because the add_to_xmax would be different from the original
5541 : * updater.
5542 : */
5543 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5544 :
5545 : /* acquire the strongest of both */
5546 207840 : if (mode < old_mode)
5547 104372 : mode = old_mode;
5548 : /* mustn't touch is_update */
5549 :
5550 207840 : old_infomask |= HEAP_XMAX_INVALID;
5551 207840 : goto l5;
5552 : }
5553 :
5554 : /* otherwise, just fall back to creating a new multixact */
5555 2034 : new_status = get_mxact_status_for_lock(mode, is_update);
5556 2034 : new_xmax = MultiXactIdCreate(xmax, old_status,
5557 : add_to_xmax, new_status);
5558 2034 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5559 : }
5560 28 : else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5561 10 : TransactionIdDidCommit(xmax))
5562 2 : {
5563 : /*
5564 : * It's a committed update, so we gotta preserve him as updater of the
5565 : * tuple.
5566 : */
5567 : MultiXactStatus status;
5568 : MultiXactStatus new_status;
5569 :
5570 2 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5571 0 : status = MultiXactStatusUpdate;
5572 : else
5573 2 : status = MultiXactStatusNoKeyUpdate;
5574 :
5575 2 : new_status = get_mxact_status_for_lock(mode, is_update);
5576 :
5577 : /*
5578 : * since it's not running, it's obviously impossible for the old
5579 : * updater to be identical to the current one, so we need not check
5580 : * for that case as we do in the block above.
5581 : */
5582 2 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5583 2 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5584 : }
5585 : else
5586 : {
5587 : /*
5588 : * Can get here iff the locking/updating transaction was running when
5589 : * the infomask was extracted from the tuple, but finished before
5590 : * TransactionIdIsInProgress got to run. Deal with it as if there was
5591 : * no locker at all in the first place.
5592 : */
5593 16 : old_infomask |= HEAP_XMAX_INVALID;
5594 16 : goto l5;
5595 : }
5596 :
5597 4064498 : *result_infomask = new_infomask;
5598 4064498 : *result_infomask2 = new_infomask2;
5599 4064498 : *result_xmax = new_xmax;
5600 4064498 : }
5601 :
5602 : /*
5603 : * Subroutine for heap_lock_updated_tuple_rec.
5604 : *
5605 : * Given a hypothetical multixact status held by the transaction identified
5606 : * with the given xid, does the current transaction need to wait, fail, or can
5607 : * it continue if it wanted to acquire a lock of the given mode? "needwait"
5608 : * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5609 : * returned. If the lock is already held by the current transaction, return
5610 : * TM_SelfModified. In case of a conflict with another transaction, a
5611 : * different HeapTupleSatisfiesUpdate return code is returned.
5612 : *
5613 : * The held status is said to be hypothetical because it might correspond to a
5614 : * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5615 : * way for simplicity of API.
5616 : */
5617 : static TM_Result
5618 64 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5619 : LockTupleMode mode, HeapTuple tup,
5620 : bool *needwait)
5621 : {
5622 : MultiXactStatus wantedstatus;
5623 :
5624 64 : *needwait = false;
5625 64 : wantedstatus = get_mxact_status_for_lock(mode, false);
5626 :
5627 : /*
5628 : * Note: we *must* check TransactionIdIsInProgress before
5629 : * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5630 : * for an explanation.
5631 : */
5632 64 : if (TransactionIdIsCurrentTransactionId(xid))
5633 : {
5634 : /*
5635 : * The tuple has already been locked by our own transaction. This is
5636 : * very rare but can happen if multiple transactions are trying to
5637 : * lock an ancient version of the same tuple.
5638 : */
5639 0 : return TM_SelfModified;
5640 : }
5641 64 : else if (TransactionIdIsInProgress(xid))
5642 : {
5643 : /*
5644 : * If the locking transaction is running, what we do depends on
5645 : * whether the lock modes conflict: if they do, then we must wait for
5646 : * it to finish; otherwise we can fall through to lock this tuple
5647 : * version without waiting.
5648 : */
5649 32 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5650 32 : LOCKMODE_from_mxstatus(wantedstatus)))
5651 : {
5652 16 : *needwait = true;
5653 : }
5654 :
5655 : /*
5656 : * If we set needwait above, then this value doesn't matter;
5657 : * otherwise, this value signals to caller that it's okay to proceed.
5658 : */
5659 32 : return TM_Ok;
5660 : }
5661 32 : else if (TransactionIdDidAbort(xid))
5662 6 : return TM_Ok;
5663 26 : else if (TransactionIdDidCommit(xid))
5664 : {
5665 : /*
5666 : * The other transaction committed. If it was only a locker, then the
5667 : * lock is completely gone now and we can return success; but if it
5668 : * was an update, then what we do depends on whether the two lock
5669 : * modes conflict. If they conflict, then we must report error to
5670 : * caller. But if they don't, we can fall through to allow the current
5671 : * transaction to lock the tuple.
5672 : *
5673 : * Note: the reason we worry about ISUPDATE here is because as soon as
5674 : * a transaction ends, all its locks are gone and meaningless, and
5675 : * thus we can ignore them; whereas its updates persist. In the
5676 : * TransactionIdIsInProgress case, above, we don't need to check
5677 : * because we know the lock is still "alive" and thus a conflict needs
5678 : * always be checked.
5679 : */
5680 26 : if (!ISUPDATE_from_mxstatus(status))
5681 8 : return TM_Ok;
5682 :
5683 18 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5684 18 : LOCKMODE_from_mxstatus(wantedstatus)))
5685 : {
5686 : /* bummer */
5687 16 : if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
5688 12 : return TM_Updated;
5689 : else
5690 4 : return TM_Deleted;
5691 : }
5692 :
5693 2 : return TM_Ok;
5694 : }
5695 :
5696 : /* Not in progress, not aborted, not committed -- must have crashed */
5697 0 : return TM_Ok;
5698 : }
5699 :
5700 :
5701 : /*
5702 : * Recursive part of heap_lock_updated_tuple
5703 : *
5704 : * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5705 : * xid with the given mode; if this tuple is updated, recurse to lock the new
5706 : * version as well.
5707 : */
5708 : static TM_Result
5709 176 : heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5710 : LockTupleMode mode)
5711 : {
5712 : TM_Result result;
5713 : ItemPointerData tupid;
5714 : HeapTupleData mytup;
5715 : Buffer buf;
5716 : uint16 new_infomask,
5717 : new_infomask2,
5718 : old_infomask,
5719 : old_infomask2;
5720 : TransactionId xmax,
5721 : new_xmax;
5722 176 : TransactionId priorXmax = InvalidTransactionId;
5723 176 : bool cleared_all_frozen = false;
5724 : bool pinned_desired_page;
5725 176 : Buffer vmbuffer = InvalidBuffer;
5726 : BlockNumber block;
5727 :
5728 176 : ItemPointerCopy(tid, &tupid);
5729 :
5730 : for (;;)
5731 : {
5732 182 : new_infomask = 0;
5733 182 : new_xmax = InvalidTransactionId;
5734 182 : block = ItemPointerGetBlockNumber(&tupid);
5735 182 : ItemPointerCopy(&tupid, &(mytup.t_self));
5736 :
5737 182 : if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5738 : {
5739 : /*
5740 : * if we fail to find the updated version of the tuple, it's
5741 : * because it was vacuumed/pruned away after its creator
5742 : * transaction aborted. So behave as if we got to the end of the
5743 : * chain, and there's no further tuple to lock: return success to
5744 : * caller.
5745 : */
5746 0 : result = TM_Ok;
5747 0 : goto out_unlocked;
5748 : }
5749 :
5750 182 : l4:
5751 198 : CHECK_FOR_INTERRUPTS();
5752 :
5753 : /*
5754 : * Before locking the buffer, pin the visibility map page if it
5755 : * appears to be necessary. Since we haven't got the lock yet,
5756 : * someone else might be in the middle of changing this, so we'll need
5757 : * to recheck after we have the lock.
5758 : */
5759 198 : if (PageIsAllVisible(BufferGetPage(buf)))
5760 : {
5761 0 : visibilitymap_pin(rel, block, &vmbuffer);
5762 0 : pinned_desired_page = true;
5763 : }
5764 : else
5765 198 : pinned_desired_page = false;
5766 :
5767 198 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5768 :
5769 : /*
5770 : * If we didn't pin the visibility map page and the page has become
5771 : * all visible while we were busy locking the buffer, we'll have to
5772 : * unlock and re-lock, to avoid holding the buffer lock across I/O.
5773 : * That's a bit unfortunate, but hopefully shouldn't happen often.
5774 : *
5775 : * Note: in some paths through this function, we will reach here
5776 : * holding a pin on a vm page that may or may not be the one matching
5777 : * this page. If this page isn't all-visible, we won't use the vm
5778 : * page, but we hold onto such a pin till the end of the function.
5779 : */
5780 198 : if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
5781 : {
5782 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5783 0 : visibilitymap_pin(rel, block, &vmbuffer);
5784 0 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5785 : }
5786 :
5787 : /*
5788 : * Check the tuple XMIN against prior XMAX, if any. If we reached the
5789 : * end of the chain, we're done, so return success.
5790 : */
5791 204 : if (TransactionIdIsValid(priorXmax) &&
5792 6 : !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5793 : priorXmax))
5794 : {
5795 0 : result = TM_Ok;
5796 0 : goto out_locked;
5797 : }
5798 :
5799 : /*
5800 : * Also check Xmin: if this tuple was created by an aborted
5801 : * (sub)transaction, then we already locked the last live one in the
5802 : * chain, thus we're done, so return success.
5803 : */
5804 198 : if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
5805 : {
5806 26 : result = TM_Ok;
5807 26 : goto out_locked;
5808 : }
5809 :
5810 172 : old_infomask = mytup.t_data->t_infomask;
5811 172 : old_infomask2 = mytup.t_data->t_infomask2;
5812 172 : xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5813 :
5814 : /*
5815 : * If this tuple version has been updated or locked by some concurrent
5816 : * transaction(s), what we do depends on whether our lock mode
5817 : * conflicts with what those other transactions hold, and also on the
5818 : * status of them.
5819 : */
5820 172 : if (!(old_infomask & HEAP_XMAX_INVALID))
5821 : {
5822 : TransactionId rawxmax;
5823 : bool needwait;
5824 :
5825 60 : rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5826 60 : if (old_infomask & HEAP_XMAX_IS_MULTI)
5827 : {
5828 : int nmembers;
5829 : int i;
5830 : MultiXactMember *members;
5831 :
5832 : /*
5833 : * We don't need a test for pg_upgrade'd tuples: this is only
5834 : * applied to tuples after the first in an update chain. Said
5835 : * first tuple in the chain may well be locked-in-9.2-and-
5836 : * pg_upgraded, but that one was already locked by our caller,
5837 : * not us; and any subsequent ones cannot be because our
5838 : * caller must necessarily have obtained a snapshot later than
5839 : * the pg_upgrade itself.
5840 : */
5841 : Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5842 :
5843 2 : nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
5844 2 : HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5845 8 : for (i = 0; i < nmembers; i++)
5846 : {
5847 6 : result = test_lockmode_for_conflict(members[i].status,
5848 6 : members[i].xid,
5849 : mode,
5850 : &mytup,
5851 : &needwait);
5852 :
5853 : /*
5854 : * If the tuple was already locked by ourselves in a
5855 : * previous iteration of this (say heap_lock_tuple was
5856 : * forced to restart the locking loop because of a change
5857 : * in xmax), then we hold the lock already on this tuple
5858 : * version and we don't need to do anything; and this is
5859 : * not an error condition either. We just need to skip
5860 : * this tuple and continue locking the next version in the
5861 : * update chain.
5862 : */
5863 6 : if (result == TM_SelfModified)
5864 : {
5865 0 : pfree(members);
5866 0 : goto next;
5867 : }
5868 :
5869 6 : if (needwait)
5870 : {
5871 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5872 0 : XactLockTableWait(members[i].xid, rel,
5873 : &mytup.t_self,
5874 : XLTW_LockUpdated);
5875 0 : pfree(members);
5876 0 : goto l4;
5877 : }
5878 6 : if (result != TM_Ok)
5879 : {
5880 0 : pfree(members);
5881 0 : goto out_locked;
5882 : }
5883 : }
5884 2 : if (members)
5885 2 : pfree(members);
5886 : }
5887 : else
5888 : {
5889 : MultiXactStatus status;
5890 :
5891 : /*
5892 : * For a non-multi Xmax, we first need to compute the
5893 : * corresponding MultiXactStatus by using the infomask bits.
5894 : */
5895 58 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5896 : {
5897 20 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5898 20 : status = MultiXactStatusForKeyShare;
5899 0 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5900 0 : status = MultiXactStatusForShare;
5901 0 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5902 : {
5903 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5904 0 : status = MultiXactStatusForUpdate;
5905 : else
5906 0 : status = MultiXactStatusForNoKeyUpdate;
5907 : }
5908 : else
5909 : {
5910 : /*
5911 : * LOCK_ONLY present alone (a pg_upgraded tuple marked
5912 : * as share-locked in the old cluster) shouldn't be
5913 : * seen in the middle of an update chain.
5914 : */
5915 0 : elog(ERROR, "invalid lock status in tuple");
5916 : }
5917 : }
5918 : else
5919 : {
5920 : /* it's an update, but which kind? */
5921 38 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5922 28 : status = MultiXactStatusUpdate;
5923 : else
5924 10 : status = MultiXactStatusNoKeyUpdate;
5925 : }
5926 :
5927 58 : result = test_lockmode_for_conflict(status, rawxmax, mode,
5928 : &mytup, &needwait);
5929 :
5930 : /*
5931 : * If the tuple was already locked by ourselves in a previous
5932 : * iteration of this (say heap_lock_tuple was forced to
5933 : * restart the locking loop because of a change in xmax), then
5934 : * we hold the lock already on this tuple version and we don't
5935 : * need to do anything; and this is not an error condition
5936 : * either. We just need to skip this tuple and continue
5937 : * locking the next version in the update chain.
5938 : */
5939 58 : if (result == TM_SelfModified)
5940 0 : goto next;
5941 :
5942 58 : if (needwait)
5943 : {
5944 16 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5945 16 : XactLockTableWait(rawxmax, rel, &mytup.t_self,
5946 : XLTW_LockUpdated);
5947 16 : goto l4;
5948 : }
5949 42 : if (result != TM_Ok)
5950 : {
5951 16 : goto out_locked;
5952 : }
5953 : }
5954 : }
5955 :
5956 : /* compute the new Xmax and infomask values for the tuple ... */
5957 140 : compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
5958 : xid, mode, false,
5959 : &new_xmax, &new_infomask, &new_infomask2);
5960 :
5961 140 : if (PageIsAllVisible(BufferGetPage(buf)) &&
5962 0 : visibilitymap_clear(rel, block, vmbuffer,
5963 : VISIBILITYMAP_ALL_FROZEN))
5964 0 : cleared_all_frozen = true;
5965 :
5966 140 : START_CRIT_SECTION();
5967 :
5968 : /* ... and set them */
5969 140 : HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
5970 140 : mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
5971 140 : mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5972 140 : mytup.t_data->t_infomask |= new_infomask;
5973 140 : mytup.t_data->t_infomask2 |= new_infomask2;
5974 :
5975 140 : MarkBufferDirty(buf);
5976 :
5977 : /* XLOG stuff */
5978 140 : if (RelationNeedsWAL(rel))
5979 : {
5980 : xl_heap_lock_updated xlrec;
5981 : XLogRecPtr recptr;
5982 140 : Page page = BufferGetPage(buf);
5983 :
5984 140 : XLogBeginInsert();
5985 140 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
5986 :
5987 140 : xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
5988 140 : xlrec.xmax = new_xmax;
5989 140 : xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
5990 140 : xlrec.flags =
5991 140 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
5992 :
5993 140 : XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
5994 :
5995 140 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
5996 :
5997 140 : PageSetLSN(page, recptr);
5998 : }
5999 :
6000 140 : END_CRIT_SECTION();
6001 :
6002 140 : next:
6003 : /* if we find the end of update chain, we're done. */
6004 280 : if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
6005 280 : HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
6006 148 : ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
6007 8 : HeapTupleHeaderIsOnlyLocked(mytup.t_data))
6008 : {
6009 134 : result = TM_Ok;
6010 134 : goto out_locked;
6011 : }
6012 :
6013 : /* tail recursion */
6014 6 : priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
6015 6 : ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
6016 6 : UnlockReleaseBuffer(buf);
6017 : }
6018 :
6019 : result = TM_Ok;
6020 :
6021 176 : out_locked:
6022 176 : UnlockReleaseBuffer(buf);
6023 :
6024 176 : out_unlocked:
6025 176 : if (vmbuffer != InvalidBuffer)
6026 0 : ReleaseBuffer(vmbuffer);
6027 :
6028 176 : return result;
6029 : }
6030 :
6031 : /*
6032 : * heap_lock_updated_tuple
6033 : * Follow update chain when locking an updated tuple, acquiring locks (row
6034 : * marks) on the updated versions.
6035 : *
6036 : * The initial tuple is assumed to be already locked.
6037 : *
6038 : * This function doesn't check visibility, it just unconditionally marks the
6039 : * tuple(s) as locked. If any tuple in the updated chain is being deleted
6040 : * concurrently (or updated with the key being modified), sleep until the
6041 : * transaction doing it is finished.
6042 : *
6043 : * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6044 : * when we have to wait for other transactions to release them, as opposed to
6045 : * what heap_lock_tuple does. The reason is that having more than one
6046 : * transaction walking the chain is probably uncommon enough that risk of
6047 : * starvation is not likely: one of the preconditions for being here is that
6048 : * the snapshot in use predates the update that created this tuple (because we
6049 : * started at an earlier version of the tuple), but at the same time such a
6050 : * transaction cannot be using repeatable read or serializable isolation
6051 : * levels, because that would lead to a serializability failure.
6052 : */
6053 : static TM_Result
6054 206 : heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
6055 : TransactionId xid, LockTupleMode mode)
6056 : {
6057 : /*
6058 : * If the tuple has not been updated, or has moved into another partition
6059 : * (effectively a delete) stop here.
6060 : */
6061 206 : if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) &&
6062 202 : !ItemPointerEquals(&tuple->t_self, ctid))
6063 : {
6064 : /*
6065 : * If this is the first possibly-multixact-able operation in the
6066 : * current transaction, set my per-backend OldestMemberMXactId
6067 : * setting. We can be certain that the transaction will never become a
6068 : * member of any older MultiXactIds than that. (We have to do this
6069 : * even if we end up just using our own TransactionId below, since
6070 : * some other backend could incorporate our XID into a MultiXact
6071 : * immediately afterwards.)
6072 : */
6073 176 : MultiXactIdSetOldestMember();
6074 :
6075 176 : return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
6076 : }
6077 :
6078 : /* nothing to lock */
6079 30 : return TM_Ok;
6080 : }
6081 :
6082 : /*
6083 : * heap_finish_speculative - mark speculative insertion as successful
6084 : *
6085 : * To successfully finish a speculative insertion we have to clear speculative
6086 : * token from tuple. To do so the t_ctid field, which will contain a
6087 : * speculative token value, is modified in place to point to the tuple itself,
6088 : * which is characteristic of a newly inserted ordinary tuple.
6089 : *
6090 : * NB: It is not ok to commit without either finishing or aborting a
6091 : * speculative insertion. We could treat speculative tuples of committed
6092 : * transactions implicitly as completed, but then we would have to be prepared
6093 : * to deal with speculative tokens on committed tuples. That wouldn't be
6094 : * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6095 : * but clearing the token at completion isn't very expensive either.
6096 : * An explicit confirmation WAL record also makes logical decoding simpler.
6097 : */
6098 : void
6099 4124 : heap_finish_speculative(Relation relation, ItemPointer tid)
6100 : {
6101 : Buffer buffer;
6102 : Page page;
6103 : OffsetNumber offnum;
6104 4124 : ItemId lp = NULL;
6105 : HeapTupleHeader htup;
6106 :
6107 4124 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
6108 4124 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6109 4124 : page = BufferGetPage(buffer);
6110 :
6111 4124 : offnum = ItemPointerGetOffsetNumber(tid);
6112 4124 : if (PageGetMaxOffsetNumber(page) >= offnum)
6113 4124 : lp = PageGetItemId(page, offnum);
6114 :
6115 4124 : if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6116 0 : elog(ERROR, "invalid lp");
6117 :
6118 4124 : htup = (HeapTupleHeader) PageGetItem(page, lp);
6119 :
6120 : /* NO EREPORT(ERROR) from here till changes are logged */
6121 4124 : START_CRIT_SECTION();
6122 :
6123 : Assert(HeapTupleHeaderIsSpeculative(htup));
6124 :
6125 4124 : MarkBufferDirty(buffer);
6126 :
6127 : /*
6128 : * Replace the speculative insertion token with a real t_ctid, pointing to
6129 : * itself like it does on regular tuples.
6130 : */
6131 4124 : htup->t_ctid = *tid;
6132 :
6133 : /* XLOG stuff */
6134 4124 : if (RelationNeedsWAL(relation))
6135 : {
6136 : xl_heap_confirm xlrec;
6137 : XLogRecPtr recptr;
6138 :
6139 4106 : xlrec.offnum = ItemPointerGetOffsetNumber(tid);
6140 :
6141 4106 : XLogBeginInsert();
6142 :
6143 : /* We want the same filtering on this as on a plain insert */
6144 4106 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
6145 :
6146 4106 : XLogRegisterData(&xlrec, SizeOfHeapConfirm);
6147 4106 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6148 :
6149 4106 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6150 :
6151 4106 : PageSetLSN(page, recptr);
6152 : }
6153 :
6154 4124 : END_CRIT_SECTION();
6155 :
6156 4124 : UnlockReleaseBuffer(buffer);
6157 4124 : }
6158 :
6159 : /*
6160 : * heap_abort_speculative - kill a speculatively inserted tuple
6161 : *
6162 : * Marks a tuple that was speculatively inserted in the same command as dead,
6163 : * by setting its xmin as invalid. That makes it immediately appear as dead
6164 : * to all transactions, including our own. In particular, it makes
6165 : * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6166 : * inserting a duplicate key value won't unnecessarily wait for our whole
6167 : * transaction to finish (it'll just wait for our speculative insertion to
6168 : * finish).
6169 : *
6170 : * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6171 : * that arise due to a mutual dependency that is not user visible. By
6172 : * definition, unprincipled deadlocks cannot be prevented by the user
6173 : * reordering lock acquisition in client code, because the implementation level
6174 : * lock acquisitions are not under the user's direct control. If speculative
6175 : * inserters did not take this precaution, then under high concurrency they
6176 : * could deadlock with each other, which would not be acceptable.
6177 : *
6178 : * This is somewhat redundant with heap_delete, but we prefer to have a
6179 : * dedicated routine with stripped down requirements. Note that this is also
6180 : * used to delete the TOAST tuples created during speculative insertion.
6181 : *
6182 : * This routine does not affect logical decoding as it only looks at
6183 : * confirmation records.
6184 : */
6185 : void
6186 20 : heap_abort_speculative(Relation relation, ItemPointer tid)
6187 : {
6188 20 : TransactionId xid = GetCurrentTransactionId();
6189 : ItemId lp;
6190 : HeapTupleData tp;
6191 : Page page;
6192 : BlockNumber block;
6193 : Buffer buffer;
6194 :
6195 : Assert(ItemPointerIsValid(tid));
6196 :
6197 20 : block = ItemPointerGetBlockNumber(tid);
6198 20 : buffer = ReadBuffer(relation, block);
6199 20 : page = BufferGetPage(buffer);
6200 :
6201 20 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6202 :
6203 : /*
6204 : * Page can't be all visible, we just inserted into it, and are still
6205 : * running.
6206 : */
6207 : Assert(!PageIsAllVisible(page));
6208 :
6209 20 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6210 : Assert(ItemIdIsNormal(lp));
6211 :
6212 20 : tp.t_tableOid = RelationGetRelid(relation);
6213 20 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6214 20 : tp.t_len = ItemIdGetLength(lp);
6215 20 : tp.t_self = *tid;
6216 :
6217 : /*
6218 : * Sanity check that the tuple really is a speculatively inserted tuple,
6219 : * inserted by us.
6220 : */
6221 20 : if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6222 0 : elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6223 20 : if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6224 0 : elog(ERROR, "attempted to kill a non-speculative tuple");
6225 : Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
6226 :
6227 : /*
6228 : * No need to check for serializable conflicts here. There is never a
6229 : * need for a combo CID, either. No need to extract replica identity, or
6230 : * do anything special with infomask bits.
6231 : */
6232 :
6233 20 : START_CRIT_SECTION();
6234 :
6235 : /*
6236 : * The tuple will become DEAD immediately. Flag that this page is a
6237 : * candidate for pruning by setting xmin to TransactionXmin. While not
6238 : * immediately prunable, it is the oldest xid we can cheaply determine
6239 : * that's safe against wraparound / being older than the table's
6240 : * relfrozenxid. To defend against the unlikely case of a new relation
6241 : * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6242 : * if so (vacuum can't subsequently move relfrozenxid to beyond
6243 : * TransactionXmin, so there's no race here).
6244 : */
6245 : Assert(TransactionIdIsValid(TransactionXmin));
6246 : {
6247 20 : TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6248 : TransactionId prune_xid;
6249 :
6250 20 : if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
6251 0 : prune_xid = relfrozenxid;
6252 : else
6253 20 : prune_xid = TransactionXmin;
6254 20 : PageSetPrunable(page, prune_xid);
6255 : }
6256 :
6257 : /* store transaction information of xact deleting the tuple */
6258 20 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
6259 20 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6260 :
6261 : /*
6262 : * Set the tuple header xmin to InvalidTransactionId. This makes the
6263 : * tuple immediately invisible everyone. (In particular, to any
6264 : * transactions waiting on the speculative token, woken up later.)
6265 : */
6266 20 : HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
6267 :
6268 : /* Clear the speculative insertion token too */
6269 20 : tp.t_data->t_ctid = tp.t_self;
6270 :
6271 20 : MarkBufferDirty(buffer);
6272 :
6273 : /*
6274 : * XLOG stuff
6275 : *
6276 : * The WAL records generated here match heap_delete(). The same recovery
6277 : * routines are used.
6278 : */
6279 20 : if (RelationNeedsWAL(relation))
6280 : {
6281 : xl_heap_delete xlrec;
6282 : XLogRecPtr recptr;
6283 :
6284 20 : xlrec.flags = XLH_DELETE_IS_SUPER;
6285 40 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
6286 20 : tp.t_data->t_infomask2);
6287 20 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
6288 20 : xlrec.xmax = xid;
6289 :
6290 20 : XLogBeginInsert();
6291 20 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
6292 20 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6293 :
6294 : /* No replica identity & replication origin logged */
6295 :
6296 20 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6297 :
6298 20 : PageSetLSN(page, recptr);
6299 : }
6300 :
6301 20 : END_CRIT_SECTION();
6302 :
6303 20 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6304 :
6305 20 : if (HeapTupleHasExternal(&tp))
6306 : {
6307 : Assert(!IsToastRelation(relation));
6308 2 : heap_toast_delete(relation, &tp, true);
6309 : }
6310 :
6311 : /*
6312 : * Never need to mark tuple for invalidation, since catalogs don't support
6313 : * speculative insertion
6314 : */
6315 :
6316 : /* Now we can release the buffer */
6317 20 : ReleaseBuffer(buffer);
6318 :
6319 : /* count deletion, as we counted the insertion too */
6320 20 : pgstat_count_heap_delete(relation);
6321 20 : }
6322 :
6323 : /*
6324 : * heap_inplace_lock - protect inplace update from concurrent heap_update()
6325 : *
6326 : * Evaluate whether the tuple's state is compatible with a no-key update.
6327 : * Current transaction rowmarks are fine, as is KEY SHARE from any
6328 : * transaction. If compatible, return true with the buffer exclusive-locked,
6329 : * and the caller must release that by calling
6330 : * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6331 : * an error. Otherwise, call release_callback(arg), wait for blocking
6332 : * transactions to end, and return false.
6333 : *
6334 : * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6335 : * DDL, this doesn't guarantee any particular predicate locking.
6336 : *
6337 : * One could modify this to return true for tuples with delete in progress,
6338 : * All inplace updaters take a lock that conflicts with DROP. If explicit
6339 : * "DELETE FROM pg_class" is in progress, we'll wait for it like we would an
6340 : * update.
6341 : *
6342 : * Readers of inplace-updated fields expect changes to those fields are
6343 : * durable. For example, vac_truncate_clog() reads datfrozenxid from
6344 : * pg_database tuples via catalog snapshots. A future snapshot must not
6345 : * return a lower datfrozenxid for the same database OID (lower in the
6346 : * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6347 : * tuple can start while we hold a lock on its buffer. In cases like
6348 : * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6349 : * to this transaction. ROLLBACK then is one case where it's okay to lose
6350 : * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6351 : * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6352 : * committed tuple.)
6353 : *
6354 : * In principle, we could avoid waiting by overwriting every tuple in the
6355 : * updated tuple chain. Reader expectations permit updating a tuple only if
6356 : * it's aborted, is the tail of the chain, or we already updated the tuple
6357 : * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6358 : * order from tail to head. That would imply either (a) mutating all tuples
6359 : * in one critical section or (b) accepting a chance of partial completion.
6360 : * Partial completion of a relfrozenxid update would have the weird
6361 : * consequence that the table's next VACUUM could see the table's relfrozenxid
6362 : * move forward between vacuum_get_cutoffs() and finishing.
6363 : */
6364 : bool
6365 377432 : heap_inplace_lock(Relation relation,
6366 : HeapTuple oldtup_ptr, Buffer buffer,
6367 : void (*release_callback) (void *), void *arg)
6368 : {
6369 377432 : HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6370 : TM_Result result;
6371 : bool ret;
6372 :
6373 : #ifdef USE_ASSERT_CHECKING
6374 : if (RelationGetRelid(relation) == RelationRelationId)
6375 : check_inplace_rel_lock(oldtup_ptr);
6376 : #endif
6377 :
6378 : Assert(BufferIsValid(buffer));
6379 :
6380 : /*
6381 : * Construct shared cache inval if necessary. Because we pass a tuple
6382 : * version without our own inplace changes or inplace changes other
6383 : * sessions complete while we wait for locks, inplace update mustn't
6384 : * change catcache lookup keys. But we aren't bothering with index
6385 : * updates either, so that's true a fortiori. After LockBuffer(), it
6386 : * would be too late, because this might reach a
6387 : * CatalogCacheInitializeCache() that locks "buffer".
6388 : */
6389 377432 : CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL);
6390 :
6391 377432 : LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6392 377432 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6393 :
6394 : /*----------
6395 : * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6396 : *
6397 : * - wait unconditionally
6398 : * - already locked tuple above, since inplace needs that unconditionally
6399 : * - don't recheck header after wait: simpler to defer to next iteration
6400 : * - don't try to continue even if the updater aborts: likewise
6401 : * - no crosscheck
6402 : */
6403 377432 : result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
6404 : buffer);
6405 :
6406 377432 : if (result == TM_Invisible)
6407 : {
6408 : /* no known way this can happen */
6409 0 : ereport(ERROR,
6410 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6411 : errmsg_internal("attempted to overwrite invisible tuple")));
6412 : }
6413 377432 : else if (result == TM_SelfModified)
6414 : {
6415 : /*
6416 : * CREATE INDEX might reach this if an expression is silly enough to
6417 : * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6418 : * statements might get here after a heap_update() of the same row, in
6419 : * the absence of an intervening CommandCounterIncrement().
6420 : */
6421 0 : ereport(ERROR,
6422 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6423 : errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6424 : }
6425 377432 : else if (result == TM_BeingModified)
6426 : {
6427 : TransactionId xwait;
6428 : uint16 infomask;
6429 :
6430 182 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
6431 182 : infomask = oldtup.t_data->t_infomask;
6432 :
6433 182 : if (infomask & HEAP_XMAX_IS_MULTI)
6434 : {
6435 10 : LockTupleMode lockmode = LockTupleNoKeyExclusive;
6436 10 : MultiXactStatus mxact_status = MultiXactStatusNoKeyUpdate;
6437 : int remain;
6438 :
6439 10 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
6440 : lockmode, NULL))
6441 : {
6442 4 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6443 4 : release_callback(arg);
6444 4 : ret = false;
6445 4 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
6446 : relation, &oldtup.t_self, XLTW_Update,
6447 : &remain);
6448 : }
6449 : else
6450 6 : ret = true;
6451 : }
6452 172 : else if (TransactionIdIsCurrentTransactionId(xwait))
6453 2 : ret = true;
6454 170 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
6455 2 : ret = true;
6456 : else
6457 : {
6458 168 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6459 168 : release_callback(arg);
6460 168 : ret = false;
6461 168 : XactLockTableWait(xwait, relation, &oldtup.t_self,
6462 : XLTW_Update);
6463 : }
6464 : }
6465 : else
6466 : {
6467 377250 : ret = (result == TM_Ok);
6468 377250 : if (!ret)
6469 : {
6470 2 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6471 2 : release_callback(arg);
6472 : }
6473 : }
6474 :
6475 : /*
6476 : * GetCatalogSnapshot() relies on invalidation messages to know when to
6477 : * take a new snapshot. COMMIT of xwait is responsible for sending the
6478 : * invalidation. We're not acquiring heavyweight locks sufficient to
6479 : * block if not yet sent, so we must take a new snapshot to ensure a later
6480 : * attempt has a fair chance. While we don't need this if xwait aborted,
6481 : * don't bother optimizing that.
6482 : */
6483 377432 : if (!ret)
6484 : {
6485 174 : UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6486 174 : ForgetInplace_Inval();
6487 174 : InvalidateCatalogSnapshot();
6488 : }
6489 377432 : return ret;
6490 : }
6491 :
6492 : /*
6493 : * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6494 : *
6495 : * The tuple cannot change size, and therefore its header fields and null
6496 : * bitmap (if any) don't change either.
6497 : *
6498 : * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6499 : */
6500 : void
6501 161710 : heap_inplace_update_and_unlock(Relation relation,
6502 : HeapTuple oldtup, HeapTuple tuple,
6503 : Buffer buffer)
6504 : {
6505 161710 : HeapTupleHeader htup = oldtup->t_data;
6506 : uint32 oldlen;
6507 : uint32 newlen;
6508 : char *dst;
6509 : char *src;
6510 161710 : int nmsgs = 0;
6511 161710 : SharedInvalidationMessage *invalMessages = NULL;
6512 161710 : bool RelcacheInitFileInval = false;
6513 :
6514 : Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6515 161710 : oldlen = oldtup->t_len - htup->t_hoff;
6516 161710 : newlen = tuple->t_len - tuple->t_data->t_hoff;
6517 161710 : if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6518 0 : elog(ERROR, "wrong tuple length");
6519 :
6520 161710 : dst = (char *) htup + htup->t_hoff;
6521 161710 : src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6522 :
6523 : /* Like RecordTransactionCommit(), log only if needed */
6524 161710 : if (XLogStandbyInfoActive())
6525 99624 : nmsgs = inplaceGetInvalidationMessages(&invalMessages,
6526 : &RelcacheInitFileInval);
6527 :
6528 : /*
6529 : * Unlink relcache init files as needed. If unlinking, acquire
6530 : * RelCacheInitLock until after associated invalidations. By doing this
6531 : * in advance, if we checkpoint and then crash between inplace
6532 : * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6533 : * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6534 : * neglect to PANIC on EIO.
6535 : */
6536 161710 : PreInplace_Inval();
6537 :
6538 : /*----------
6539 : * NO EREPORT(ERROR) from here till changes are complete
6540 : *
6541 : * Our buffer lock won't stop a reader having already pinned and checked
6542 : * visibility for this tuple. Hence, we write WAL first, then mutate the
6543 : * buffer. Like in MarkBufferDirtyHint() or RecordTransactionCommit(),
6544 : * checkpoint delay makes that acceptable. With the usual order of
6545 : * changes, a crash after memcpy() and before XLogInsert() could allow
6546 : * datfrozenxid to overtake relfrozenxid:
6547 : *
6548 : * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6549 : * ["R" is a VACUUM tbl]
6550 : * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6551 : * D: systable_getnext() returns pg_class tuple of tbl
6552 : * R: memcpy() into pg_class tuple of tbl
6553 : * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6554 : * [crash]
6555 : * [recovery restores datfrozenxid w/o relfrozenxid]
6556 : *
6557 : * Mimic MarkBufferDirtyHint() subroutine XLogSaveBufferForHint().
6558 : * Specifically, use DELAY_CHKPT_START, and copy the buffer to the stack.
6559 : * The stack copy facilitates a FPI of the post-mutation block before we
6560 : * accept other sessions seeing it. DELAY_CHKPT_START allows us to
6561 : * XLogInsert() before MarkBufferDirty(). Since XLogSaveBufferForHint()
6562 : * can operate under BUFFER_LOCK_SHARED, it can't avoid DELAY_CHKPT_START.
6563 : * This function, however, likely could avoid it with the following order
6564 : * of operations: MarkBufferDirty(), XLogInsert(), memcpy(). Opt to use
6565 : * DELAY_CHKPT_START here, too, as a way to have fewer distinct code
6566 : * patterns to analyze. Inplace update isn't so frequent that it should
6567 : * pursue the small optimization of skipping DELAY_CHKPT_START.
6568 : */
6569 : Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
6570 161710 : START_CRIT_SECTION();
6571 161710 : MyProc->delayChkptFlags |= DELAY_CHKPT_START;
6572 :
6573 : /* XLOG stuff */
6574 161710 : if (RelationNeedsWAL(relation))
6575 : {
6576 : xl_heap_inplace xlrec;
6577 : PGAlignedBlock copied_buffer;
6578 161694 : char *origdata = (char *) BufferGetBlock(buffer);
6579 161694 : Page page = BufferGetPage(buffer);
6580 161694 : uint16 lower = ((PageHeader) page)->pd_lower;
6581 161694 : uint16 upper = ((PageHeader) page)->pd_upper;
6582 : uintptr_t dst_offset_in_block;
6583 : RelFileLocator rlocator;
6584 : ForkNumber forkno;
6585 : BlockNumber blkno;
6586 : XLogRecPtr recptr;
6587 :
6588 161694 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6589 161694 : xlrec.dbId = MyDatabaseId;
6590 161694 : xlrec.tsId = MyDatabaseTableSpace;
6591 161694 : xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6592 161694 : xlrec.nmsgs = nmsgs;
6593 :
6594 161694 : XLogBeginInsert();
6595 161694 : XLogRegisterData(&xlrec, MinSizeOfHeapInplace);
6596 161694 : if (nmsgs != 0)
6597 70324 : XLogRegisterData(invalMessages,
6598 : nmsgs * sizeof(SharedInvalidationMessage));
6599 :
6600 : /* register block matching what buffer will look like after changes */
6601 161694 : memcpy(copied_buffer.data, origdata, lower);
6602 161694 : memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
6603 161694 : dst_offset_in_block = dst - origdata;
6604 161694 : memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
6605 161694 : BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6606 : Assert(forkno == MAIN_FORKNUM);
6607 161694 : XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6608 : REGBUF_STANDARD);
6609 161694 : XLogRegisterBufData(0, src, newlen);
6610 :
6611 : /* inplace updates aren't decoded atm, don't log the origin */
6612 :
6613 161694 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6614 :
6615 161694 : PageSetLSN(page, recptr);
6616 : }
6617 :
6618 161710 : memcpy(dst, src, newlen);
6619 :
6620 161710 : MarkBufferDirty(buffer);
6621 :
6622 161710 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6623 :
6624 : /*
6625 : * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6626 : * do this before UnlockTuple().
6627 : *
6628 : * If we're mutating a tuple visible only to this transaction, there's an
6629 : * equivalent transactional inval from the action that created the tuple,
6630 : * and this inval is superfluous.
6631 : */
6632 161710 : AtInplace_Inval();
6633 :
6634 161710 : MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
6635 161710 : END_CRIT_SECTION();
6636 161710 : UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6637 :
6638 161710 : AcceptInvalidationMessages(); /* local processing of just-sent inval */
6639 :
6640 : /*
6641 : * Queue a transactional inval. The immediate invalidation we just sent
6642 : * is the only one known to be necessary. To reduce risk from the
6643 : * transition to immediate invalidation, continue sending a transactional
6644 : * invalidation like we've long done. Third-party code might rely on it.
6645 : */
6646 161710 : if (!IsBootstrapProcessingMode())
6647 132410 : CacheInvalidateHeapTuple(relation, tuple, NULL);
6648 161710 : }
6649 :
6650 : /*
6651 : * heap_inplace_unlock - reverse of heap_inplace_lock
6652 : */
6653 : void
6654 215548 : heap_inplace_unlock(Relation relation,
6655 : HeapTuple oldtup, Buffer buffer)
6656 : {
6657 215548 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6658 215548 : UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
6659 215548 : ForgetInplace_Inval();
6660 215548 : }
6661 :
6662 : #define FRM_NOOP 0x0001
6663 : #define FRM_INVALIDATE_XMAX 0x0002
6664 : #define FRM_RETURN_IS_XID 0x0004
6665 : #define FRM_RETURN_IS_MULTI 0x0008
6666 : #define FRM_MARK_COMMITTED 0x0010
6667 :
6668 : /*
6669 : * FreezeMultiXactId
6670 : * Determine what to do during freezing when a tuple is marked by a
6671 : * MultiXactId.
6672 : *
6673 : * "flags" is an output value; it's used to tell caller what to do on return.
6674 : * "pagefrz" is an input/output value, used to manage page level freezing.
6675 : *
6676 : * Possible values that we can set in "flags":
6677 : * FRM_NOOP
6678 : * don't do anything -- keep existing Xmax
6679 : * FRM_INVALIDATE_XMAX
6680 : * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6681 : * FRM_RETURN_IS_XID
6682 : * The Xid return value is a single update Xid to set as xmax.
6683 : * FRM_MARK_COMMITTED
6684 : * Xmax can be marked as HEAP_XMAX_COMMITTED
6685 : * FRM_RETURN_IS_MULTI
6686 : * The return value is a new MultiXactId to set as new Xmax.
6687 : * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6688 : *
6689 : * Caller delegates control of page freezing to us. In practice we always
6690 : * force freezing of caller's page unless FRM_NOOP processing is indicated.
6691 : * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6692 : * can never be left behind. We freely choose when and how to process each
6693 : * Multi, without ever violating the cutoff postconditions for freezing.
6694 : *
6695 : * It's useful to remove Multis on a proactive timeline (relative to freezing
6696 : * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6697 : * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6698 : * misses through eager processing.
6699 : *
6700 : * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6701 : * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6702 : * This can usually be put off, which is usually enough to avoid it altogether.
6703 : * Allocating new multis during VACUUM should be avoided on general principle;
6704 : * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6705 : * its own special risks.
6706 : *
6707 : * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6708 : * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6709 : *
6710 : * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6711 : * have already forced page-level freezing, since that might incur the same
6712 : * SLRU buffer misses that we specifically intended to avoid by freezing.
6713 : */
6714 : static TransactionId
6715 12 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
6716 : const struct VacuumCutoffs *cutoffs, uint16 *flags,
6717 : HeapPageFreeze *pagefrz)
6718 : {
6719 : TransactionId newxmax;
6720 : MultiXactMember *members;
6721 : int nmembers;
6722 : bool need_replace;
6723 : int nnewmembers;
6724 : MultiXactMember *newmembers;
6725 : bool has_lockers;
6726 : TransactionId update_xid;
6727 : bool update_committed;
6728 : TransactionId FreezePageRelfrozenXid;
6729 :
6730 12 : *flags = 0;
6731 :
6732 : /* We should only be called in Multis */
6733 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6734 :
6735 24 : if (!MultiXactIdIsValid(multi) ||
6736 12 : HEAP_LOCKED_UPGRADED(t_infomask))
6737 : {
6738 0 : *flags |= FRM_INVALIDATE_XMAX;
6739 0 : pagefrz->freeze_required = true;
6740 0 : return InvalidTransactionId;
6741 : }
6742 12 : else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
6743 0 : ereport(ERROR,
6744 : (errcode(ERRCODE_DATA_CORRUPTED),
6745 : errmsg_internal("found multixact %u from before relminmxid %u",
6746 : multi, cutoffs->relminmxid)));
6747 12 : else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6748 : {
6749 : TransactionId update_xact;
6750 :
6751 : /*
6752 : * This old multi cannot possibly have members still running, but
6753 : * verify just in case. If it was a locker only, it can be removed
6754 : * without any further consideration; but if it contained an update,
6755 : * we might need to preserve it.
6756 : */
6757 8 : if (MultiXactIdIsRunning(multi,
6758 8 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6759 0 : ereport(ERROR,
6760 : (errcode(ERRCODE_DATA_CORRUPTED),
6761 : errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6762 : multi, cutoffs->OldestMxact)));
6763 :
6764 8 : if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6765 : {
6766 8 : *flags |= FRM_INVALIDATE_XMAX;
6767 8 : pagefrz->freeze_required = true;
6768 8 : return InvalidTransactionId;
6769 : }
6770 :
6771 : /* replace multi with single XID for its updater? */
6772 0 : update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6773 0 : if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
6774 0 : ereport(ERROR,
6775 : (errcode(ERRCODE_DATA_CORRUPTED),
6776 : errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6777 : multi, update_xact,
6778 : cutoffs->relfrozenxid)));
6779 0 : else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6780 : {
6781 : /*
6782 : * Updater XID has to have aborted (otherwise the tuple would have
6783 : * been pruned away instead, since updater XID is < OldestXmin).
6784 : * Just remove xmax.
6785 : */
6786 0 : if (TransactionIdDidCommit(update_xact))
6787 0 : ereport(ERROR,
6788 : (errcode(ERRCODE_DATA_CORRUPTED),
6789 : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6790 : multi, update_xact,
6791 : cutoffs->OldestXmin)));
6792 0 : *flags |= FRM_INVALIDATE_XMAX;
6793 0 : pagefrz->freeze_required = true;
6794 0 : return InvalidTransactionId;
6795 : }
6796 :
6797 : /* Have to keep updater XID as new xmax */
6798 0 : *flags |= FRM_RETURN_IS_XID;
6799 0 : pagefrz->freeze_required = true;
6800 0 : return update_xact;
6801 : }
6802 :
6803 : /*
6804 : * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6805 : * need to walk the whole members array to figure out what to do, if
6806 : * anything.
6807 : */
6808 : nmembers =
6809 4 : GetMultiXactIdMembers(multi, &members, false,
6810 4 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6811 4 : if (nmembers <= 0)
6812 : {
6813 : /* Nothing worth keeping */
6814 0 : *flags |= FRM_INVALIDATE_XMAX;
6815 0 : pagefrz->freeze_required = true;
6816 0 : return InvalidTransactionId;
6817 : }
6818 :
6819 : /*
6820 : * The FRM_NOOP case is the only case where we might need to ratchet back
6821 : * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6822 : * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6823 : * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6824 : * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6825 : * trackers managed by VACUUM being ratcheting back by xmax to the degree
6826 : * required to make it safe to leave xmax undisturbed, independent of
6827 : * whether or not page freezing is triggered somewhere else.
6828 : *
6829 : * Our policy is to force freezing in every case other than FRM_NOOP,
6830 : * which obviates the need to maintain either set of trackers, anywhere.
6831 : * Every other case will reliably execute a freeze plan for xmax that
6832 : * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6833 : * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6834 : * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6835 : * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6836 : */
6837 4 : need_replace = false;
6838 4 : FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
6839 8 : for (int i = 0; i < nmembers; i++)
6840 : {
6841 6 : TransactionId xid = members[i].xid;
6842 :
6843 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6844 :
6845 6 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6846 : {
6847 : /* Can't violate the FreezeLimit postcondition */
6848 2 : need_replace = true;
6849 2 : break;
6850 : }
6851 4 : if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
6852 0 : FreezePageRelfrozenXid = xid;
6853 : }
6854 :
6855 : /* Can't violate the MultiXactCutoff postcondition, either */
6856 4 : if (!need_replace)
6857 2 : need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
6858 :
6859 4 : if (!need_replace)
6860 : {
6861 : /*
6862 : * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6863 : * both together to make it safe to retain this particular multi after
6864 : * freezing its page
6865 : */
6866 2 : *flags |= FRM_NOOP;
6867 2 : pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6868 2 : if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
6869 0 : pagefrz->FreezePageRelminMxid = multi;
6870 2 : pfree(members);
6871 2 : return multi;
6872 : }
6873 :
6874 : /*
6875 : * Do a more thorough second pass over the multi to figure out which
6876 : * member XIDs actually need to be kept. Checking the precise status of
6877 : * individual members might even show that we don't need to keep anything.
6878 : * That is quite possible even though the Multi must be >= OldestMxact,
6879 : * since our second pass only keeps member XIDs when it's truly necessary;
6880 : * even member XIDs >= OldestXmin often won't be kept by second pass.
6881 : */
6882 2 : nnewmembers = 0;
6883 2 : newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6884 2 : has_lockers = false;
6885 2 : update_xid = InvalidTransactionId;
6886 2 : update_committed = false;
6887 :
6888 : /*
6889 : * Determine whether to keep each member xid, or to ignore it instead
6890 : */
6891 6 : for (int i = 0; i < nmembers; i++)
6892 : {
6893 4 : TransactionId xid = members[i].xid;
6894 4 : MultiXactStatus mstatus = members[i].status;
6895 :
6896 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6897 :
6898 4 : if (!ISUPDATE_from_mxstatus(mstatus))
6899 : {
6900 : /*
6901 : * Locker XID (not updater XID). We only keep lockers that are
6902 : * still running.
6903 : */
6904 8 : if (TransactionIdIsCurrentTransactionId(xid) ||
6905 4 : TransactionIdIsInProgress(xid))
6906 : {
6907 2 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6908 0 : ereport(ERROR,
6909 : (errcode(ERRCODE_DATA_CORRUPTED),
6910 : errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6911 : multi, xid,
6912 : cutoffs->OldestXmin)));
6913 2 : newmembers[nnewmembers++] = members[i];
6914 2 : has_lockers = true;
6915 : }
6916 :
6917 4 : continue;
6918 : }
6919 :
6920 : /*
6921 : * Updater XID (not locker XID). Should we keep it?
6922 : *
6923 : * Since the tuple wasn't totally removed when vacuum pruned, the
6924 : * update Xid cannot possibly be older than OldestXmin cutoff unless
6925 : * the updater XID aborted. If the updater transaction is known
6926 : * aborted or crashed then it's okay to ignore it, otherwise not.
6927 : *
6928 : * In any case the Multi should never contain two updaters, whatever
6929 : * their individual commit status. Check for that first, in passing.
6930 : */
6931 0 : if (TransactionIdIsValid(update_xid))
6932 0 : ereport(ERROR,
6933 : (errcode(ERRCODE_DATA_CORRUPTED),
6934 : errmsg_internal("multixact %u has two or more updating members",
6935 : multi),
6936 : errdetail_internal("First updater XID=%u second updater XID=%u.",
6937 : update_xid, xid)));
6938 :
6939 : /*
6940 : * As with all tuple visibility routines, it's critical to test
6941 : * TransactionIdIsInProgress before TransactionIdDidCommit, because of
6942 : * race conditions explained in detail in heapam_visibility.c.
6943 : */
6944 0 : if (TransactionIdIsCurrentTransactionId(xid) ||
6945 0 : TransactionIdIsInProgress(xid))
6946 0 : update_xid = xid;
6947 0 : else if (TransactionIdDidCommit(xid))
6948 : {
6949 : /*
6950 : * The transaction committed, so we can tell caller to set
6951 : * HEAP_XMAX_COMMITTED. (We can only do this because we know the
6952 : * transaction is not running.)
6953 : */
6954 0 : update_committed = true;
6955 0 : update_xid = xid;
6956 : }
6957 : else
6958 : {
6959 : /*
6960 : * Not in progress, not committed -- must be aborted or crashed;
6961 : * we can ignore it.
6962 : */
6963 0 : continue;
6964 : }
6965 :
6966 : /*
6967 : * We determined that updater must be kept -- add it to pending new
6968 : * members list
6969 : */
6970 0 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
6971 0 : ereport(ERROR,
6972 : (errcode(ERRCODE_DATA_CORRUPTED),
6973 : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6974 : multi, xid, cutoffs->OldestXmin)));
6975 0 : newmembers[nnewmembers++] = members[i];
6976 : }
6977 :
6978 2 : pfree(members);
6979 :
6980 : /*
6981 : * Determine what to do with caller's multi based on information gathered
6982 : * during our second pass
6983 : */
6984 2 : if (nnewmembers == 0)
6985 : {
6986 : /* Nothing worth keeping */
6987 0 : *flags |= FRM_INVALIDATE_XMAX;
6988 0 : newxmax = InvalidTransactionId;
6989 : }
6990 2 : else if (TransactionIdIsValid(update_xid) && !has_lockers)
6991 : {
6992 : /*
6993 : * If there's a single member and it's an update, pass it back alone
6994 : * without creating a new Multi. (XXX we could do this when there's a
6995 : * single remaining locker, too, but that would complicate the API too
6996 : * much; moreover, the case with the single updater is more
6997 : * interesting, because those are longer-lived.)
6998 : */
6999 : Assert(nnewmembers == 1);
7000 0 : *flags |= FRM_RETURN_IS_XID;
7001 0 : if (update_committed)
7002 0 : *flags |= FRM_MARK_COMMITTED;
7003 0 : newxmax = update_xid;
7004 : }
7005 : else
7006 : {
7007 : /*
7008 : * Create a new multixact with the surviving members of the previous
7009 : * one, to set as new Xmax in the tuple
7010 : */
7011 2 : newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
7012 2 : *flags |= FRM_RETURN_IS_MULTI;
7013 : }
7014 :
7015 2 : pfree(newmembers);
7016 :
7017 2 : pagefrz->freeze_required = true;
7018 2 : return newxmax;
7019 : }
7020 :
7021 : /*
7022 : * heap_prepare_freeze_tuple
7023 : *
7024 : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7025 : * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
7026 : * setup enough state (in the *frz output argument) to enable caller to
7027 : * process this tuple as part of freezing its page, and return true. Return
7028 : * false if nothing can be changed about the tuple right now.
7029 : *
7030 : * Also sets *totally_frozen to true if the tuple will be totally frozen once
7031 : * caller executes returned freeze plan (or if the tuple was already totally
7032 : * frozen by an earlier VACUUM). This indicates that there are no remaining
7033 : * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
7034 : *
7035 : * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
7036 : * tuple that we returned true for, and then execute freezing. Caller must
7037 : * initialize pagefrz fields for page as a whole before first call here for
7038 : * each heap page.
7039 : *
7040 : * VACUUM caller decides on whether or not to freeze the page as a whole.
7041 : * We'll often prepare freeze plans for a page that caller just discards.
7042 : * However, VACUUM doesn't always get to make a choice; it must freeze when
7043 : * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7044 : * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7045 : * that VACUUM always follows that rule.
7046 : *
7047 : * We sometimes force freezing of xmax MultiXactId values long before it is
7048 : * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7049 : * It's worth processing MultiXactIds proactively when it is cheap to do so,
7050 : * and it's convenient to make that happen by piggy-backing it on the "force
7051 : * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7052 : * because it is expensive right now (though only when it's still possible to
7053 : * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7054 : *
7055 : * It is assumed that the caller has checked the tuple with
7056 : * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7057 : * (else we should be removing the tuple, not freezing it).
7058 : *
7059 : * NB: This function has side effects: it might allocate a new MultiXactId.
7060 : * It will be set as tuple's new xmax when our *frz output is processed within
7061 : * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7062 : * then caller had better have an exclusive lock on it already.
7063 : */
7064 : bool
7065 50662318 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
7066 : const struct VacuumCutoffs *cutoffs,
7067 : HeapPageFreeze *pagefrz,
7068 : HeapTupleFreeze *frz, bool *totally_frozen)
7069 : {
7070 50662318 : bool xmin_already_frozen = false,
7071 50662318 : xmax_already_frozen = false;
7072 50662318 : bool freeze_xmin = false,
7073 50662318 : replace_xvac = false,
7074 50662318 : replace_xmax = false,
7075 50662318 : freeze_xmax = false;
7076 : TransactionId xid;
7077 :
7078 50662318 : frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
7079 50662318 : frz->t_infomask2 = tuple->t_infomask2;
7080 50662318 : frz->t_infomask = tuple->t_infomask;
7081 50662318 : frz->frzflags = 0;
7082 50662318 : frz->checkflags = 0;
7083 :
7084 : /*
7085 : * Process xmin, while keeping track of whether it's already frozen, or
7086 : * will become frozen iff our freeze plan is executed by caller (could be
7087 : * neither).
7088 : */
7089 50662318 : xid = HeapTupleHeaderGetXmin(tuple);
7090 50662318 : if (!TransactionIdIsNormal(xid))
7091 44762734 : xmin_already_frozen = true;
7092 : else
7093 : {
7094 5899584 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7095 0 : ereport(ERROR,
7096 : (errcode(ERRCODE_DATA_CORRUPTED),
7097 : errmsg_internal("found xmin %u from before relfrozenxid %u",
7098 : xid, cutoffs->relfrozenxid)));
7099 :
7100 : /* Will set freeze_xmin flags in freeze plan below */
7101 5899584 : freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7102 :
7103 : /* Verify that xmin committed if and when freeze plan is executed */
7104 5899584 : if (freeze_xmin)
7105 4503086 : frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
7106 : }
7107 :
7108 : /*
7109 : * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7110 : * as we support having MOVED_OFF/MOVED_IN tuples in the database
7111 : */
7112 50662318 : xid = HeapTupleHeaderGetXvac(tuple);
7113 50662318 : if (TransactionIdIsNormal(xid))
7114 : {
7115 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7116 : Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7117 :
7118 : /*
7119 : * For Xvac, we always freeze proactively. This allows totally_frozen
7120 : * tracking to ignore xvac.
7121 : */
7122 0 : replace_xvac = pagefrz->freeze_required = true;
7123 :
7124 : /* Will set replace_xvac flags in freeze plan below */
7125 : }
7126 :
7127 : /* Now process xmax */
7128 50662318 : xid = frz->xmax;
7129 50662318 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7130 : {
7131 : /* Raw xmax is a MultiXactId */
7132 : TransactionId newxmax;
7133 : uint16 flags;
7134 :
7135 : /*
7136 : * We will either remove xmax completely (in the "freeze_xmax" path),
7137 : * process xmax by replacing it (in the "replace_xmax" path), or
7138 : * perform no-op xmax processing. The only constraint is that the
7139 : * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7140 : */
7141 12 : newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7142 : &flags, pagefrz);
7143 :
7144 12 : if (flags & FRM_NOOP)
7145 : {
7146 : /*
7147 : * xmax is a MultiXactId, and nothing about it changes for now.
7148 : * This is the only case where 'freeze_required' won't have been
7149 : * set for us by FreezeMultiXactId, as well as the only case where
7150 : * neither freeze_xmax nor replace_xmax are set (given a multi).
7151 : *
7152 : * This is a no-op, but the call to FreezeMultiXactId might have
7153 : * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7154 : * for us (the "freeze page" variants, specifically). That'll
7155 : * make it safe for our caller to freeze the page later on, while
7156 : * leaving this particular xmax undisturbed.
7157 : *
7158 : * FreezeMultiXactId is _not_ responsible for the "no freeze"
7159 : * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7160 : * job. A call to heap_tuple_should_freeze for this same tuple
7161 : * will take place below if 'freeze_required' isn't set already.
7162 : * (This repeats work from FreezeMultiXactId, but allows "no
7163 : * freeze" tracker maintenance to happen in only one place.)
7164 : */
7165 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
7166 : Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
7167 : }
7168 10 : else if (flags & FRM_RETURN_IS_XID)
7169 : {
7170 : /*
7171 : * xmax will become an updater Xid (original MultiXact's updater
7172 : * member Xid will be carried forward as a simple Xid in Xmax).
7173 : */
7174 : Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
7175 :
7176 : /*
7177 : * NB -- some of these transformations are only valid because we
7178 : * know the return Xid is a tuple updater (i.e. not merely a
7179 : * locker.) Also note that the only reason we don't explicitly
7180 : * worry about HEAP_KEYS_UPDATED is because it lives in
7181 : * t_infomask2 rather than t_infomask.
7182 : */
7183 0 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7184 0 : frz->xmax = newxmax;
7185 0 : if (flags & FRM_MARK_COMMITTED)
7186 0 : frz->t_infomask |= HEAP_XMAX_COMMITTED;
7187 0 : replace_xmax = true;
7188 : }
7189 10 : else if (flags & FRM_RETURN_IS_MULTI)
7190 : {
7191 : uint16 newbits;
7192 : uint16 newbits2;
7193 :
7194 : /*
7195 : * xmax is an old MultiXactId that we have to replace with a new
7196 : * MultiXactId, to carry forward two or more original member XIDs.
7197 : */
7198 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
7199 :
7200 : /*
7201 : * We can't use GetMultiXactIdHintBits directly on the new multi
7202 : * here; that routine initializes the masks to all zeroes, which
7203 : * would lose other bits we need. Doing it this way ensures all
7204 : * unrelated bits remain untouched.
7205 : */
7206 2 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7207 2 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7208 2 : GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
7209 2 : frz->t_infomask |= newbits;
7210 2 : frz->t_infomask2 |= newbits2;
7211 2 : frz->xmax = newxmax;
7212 2 : replace_xmax = true;
7213 : }
7214 : else
7215 : {
7216 : /*
7217 : * Freeze plan for tuple "freezes xmax" in the strictest sense:
7218 : * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7219 : */
7220 : Assert(flags & FRM_INVALIDATE_XMAX);
7221 : Assert(!TransactionIdIsValid(newxmax));
7222 :
7223 : /* Will set freeze_xmax flags in freeze plan below */
7224 8 : freeze_xmax = true;
7225 : }
7226 :
7227 : /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
7228 : Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
7229 : }
7230 50662306 : else if (TransactionIdIsNormal(xid))
7231 : {
7232 : /* Raw xmax is normal XID */
7233 18568132 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
7234 0 : ereport(ERROR,
7235 : (errcode(ERRCODE_DATA_CORRUPTED),
7236 : errmsg_internal("found xmax %u from before relfrozenxid %u",
7237 : xid, cutoffs->relfrozenxid)));
7238 :
7239 : /* Will set freeze_xmax flags in freeze plan below */
7240 18568132 : freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7241 :
7242 : /*
7243 : * Verify that xmax aborted if and when freeze plan is executed,
7244 : * provided it's from an update. (A lock-only xmax can be removed
7245 : * independent of this, since the lock is released at xact end.)
7246 : */
7247 18568132 : if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
7248 1778 : frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
7249 : }
7250 32094174 : else if (!TransactionIdIsValid(xid))
7251 : {
7252 : /* Raw xmax is InvalidTransactionId XID */
7253 : Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
7254 32094174 : xmax_already_frozen = true;
7255 : }
7256 : else
7257 0 : ereport(ERROR,
7258 : (errcode(ERRCODE_DATA_CORRUPTED),
7259 : errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7260 : xid, tuple->t_infomask)));
7261 :
7262 50662318 : if (freeze_xmin)
7263 : {
7264 : Assert(!xmin_already_frozen);
7265 :
7266 4503086 : frz->t_infomask |= HEAP_XMIN_FROZEN;
7267 : }
7268 50662318 : if (replace_xvac)
7269 : {
7270 : /*
7271 : * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7272 : * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7273 : * transaction succeeded.
7274 : */
7275 : Assert(pagefrz->freeze_required);
7276 0 : if (tuple->t_infomask & HEAP_MOVED_OFF)
7277 0 : frz->frzflags |= XLH_INVALID_XVAC;
7278 : else
7279 0 : frz->frzflags |= XLH_FREEZE_XVAC;
7280 : }
7281 : if (replace_xmax)
7282 : {
7283 : Assert(!xmax_already_frozen && !freeze_xmax);
7284 : Assert(pagefrz->freeze_required);
7285 :
7286 : /* Already set replace_xmax flags in freeze plan earlier */
7287 : }
7288 50662318 : if (freeze_xmax)
7289 : {
7290 : Assert(!xmax_already_frozen && !replace_xmax);
7291 :
7292 3858 : frz->xmax = InvalidTransactionId;
7293 :
7294 : /*
7295 : * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7296 : * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7297 : * Also get rid of the HEAP_KEYS_UPDATED bit.
7298 : */
7299 3858 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7300 3858 : frz->t_infomask |= HEAP_XMAX_INVALID;
7301 3858 : frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7302 3858 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7303 : }
7304 :
7305 : /*
7306 : * Determine if this tuple is already totally frozen, or will become
7307 : * totally frozen (provided caller executes freeze plans for the page)
7308 : */
7309 99924280 : *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
7310 49261962 : (freeze_xmax || xmax_already_frozen));
7311 :
7312 50662318 : if (!pagefrz->freeze_required && !(xmin_already_frozen &&
7313 : xmax_already_frozen))
7314 : {
7315 : /*
7316 : * So far no previous tuple from the page made freezing mandatory.
7317 : * Does this tuple force caller to freeze the entire page?
7318 : */
7319 21412364 : pagefrz->freeze_required =
7320 21412364 : heap_tuple_should_freeze(tuple, cutoffs,
7321 : &pagefrz->NoFreezePageRelfrozenXid,
7322 : &pagefrz->NoFreezePageRelminMxid);
7323 : }
7324 :
7325 : /* Tell caller if this tuple has a usable freeze plan set in *frz */
7326 50662318 : return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
7327 : }
7328 :
7329 : /*
7330 : * Perform xmin/xmax XID status sanity checks before actually executing freeze
7331 : * plans.
7332 : *
7333 : * heap_prepare_freeze_tuple doesn't perform these checks directly because
7334 : * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7335 : * successive VACUUMs that each decide against freezing the same page.
7336 : */
7337 : void
7338 44764 : heap_pre_freeze_checks(Buffer buffer,
7339 : HeapTupleFreeze *tuples, int ntuples)
7340 : {
7341 44764 : Page page = BufferGetPage(buffer);
7342 :
7343 1926456 : for (int i = 0; i < ntuples; i++)
7344 : {
7345 1881692 : HeapTupleFreeze *frz = tuples + i;
7346 1881692 : ItemId itemid = PageGetItemId(page, frz->offset);
7347 : HeapTupleHeader htup;
7348 :
7349 1881692 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7350 :
7351 : /* Deliberately avoid relying on tuple hint bits here */
7352 1881692 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
7353 : {
7354 1881690 : TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
7355 :
7356 : Assert(!HeapTupleHeaderXminFrozen(htup));
7357 1881690 : if (unlikely(!TransactionIdDidCommit(xmin)))
7358 0 : ereport(ERROR,
7359 : (errcode(ERRCODE_DATA_CORRUPTED),
7360 : errmsg_internal("uncommitted xmin %u needs to be frozen",
7361 : xmin)));
7362 : }
7363 :
7364 : /*
7365 : * TransactionIdDidAbort won't work reliably in the presence of XIDs
7366 : * left behind by transactions that were in progress during a crash,
7367 : * so we can only check that xmax didn't commit
7368 : */
7369 1881692 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
7370 : {
7371 632 : TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
7372 :
7373 : Assert(TransactionIdIsNormal(xmax));
7374 632 : if (unlikely(TransactionIdDidCommit(xmax)))
7375 0 : ereport(ERROR,
7376 : (errcode(ERRCODE_DATA_CORRUPTED),
7377 : errmsg_internal("cannot freeze committed xmax %u",
7378 : xmax)));
7379 : }
7380 : }
7381 44764 : }
7382 :
7383 : /*
7384 : * Helper which executes freezing of one or more heap tuples on a page on
7385 : * behalf of caller. Caller passes an array of tuple plans from
7386 : * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7387 : * Must be called in a critical section that also marks the buffer dirty and,
7388 : * if needed, emits WAL.
7389 : */
7390 : void
7391 44764 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
7392 : {
7393 44764 : Page page = BufferGetPage(buffer);
7394 :
7395 1926456 : for (int i = 0; i < ntuples; i++)
7396 : {
7397 1881692 : HeapTupleFreeze *frz = tuples + i;
7398 1881692 : ItemId itemid = PageGetItemId(page, frz->offset);
7399 : HeapTupleHeader htup;
7400 :
7401 1881692 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7402 1881692 : heap_execute_freeze_tuple(htup, frz);
7403 : }
7404 44764 : }
7405 :
7406 : /*
7407 : * heap_freeze_tuple
7408 : * Freeze tuple in place, without WAL logging.
7409 : *
7410 : * Useful for callers like CLUSTER that perform their own WAL logging.
7411 : */
7412 : bool
7413 715704 : heap_freeze_tuple(HeapTupleHeader tuple,
7414 : TransactionId relfrozenxid, TransactionId relminmxid,
7415 : TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7416 : {
7417 : HeapTupleFreeze frz;
7418 : bool do_freeze;
7419 : bool totally_frozen;
7420 : struct VacuumCutoffs cutoffs;
7421 : HeapPageFreeze pagefrz;
7422 :
7423 715704 : cutoffs.relfrozenxid = relfrozenxid;
7424 715704 : cutoffs.relminmxid = relminmxid;
7425 715704 : cutoffs.OldestXmin = FreezeLimit;
7426 715704 : cutoffs.OldestMxact = MultiXactCutoff;
7427 715704 : cutoffs.FreezeLimit = FreezeLimit;
7428 715704 : cutoffs.MultiXactCutoff = MultiXactCutoff;
7429 :
7430 715704 : pagefrz.freeze_required = true;
7431 715704 : pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7432 715704 : pagefrz.FreezePageRelminMxid = MultiXactCutoff;
7433 715704 : pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7434 715704 : pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7435 :
7436 715704 : do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7437 : &pagefrz, &frz, &totally_frozen);
7438 :
7439 : /*
7440 : * Note that because this is not a WAL-logged operation, we don't need to
7441 : * fill in the offset in the freeze record.
7442 : */
7443 :
7444 715704 : if (do_freeze)
7445 509692 : heap_execute_freeze_tuple(tuple, &frz);
7446 715704 : return do_freeze;
7447 : }
7448 :
7449 : /*
7450 : * For a given MultiXactId, return the hint bits that should be set in the
7451 : * tuple's infomask.
7452 : *
7453 : * Normally this should be called for a multixact that was just created, and
7454 : * so is on our local cache, so the GetMembers call is fast.
7455 : */
7456 : static void
7457 2354 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
7458 : uint16 *new_infomask2)
7459 : {
7460 : int nmembers;
7461 : MultiXactMember *members;
7462 : int i;
7463 2354 : uint16 bits = HEAP_XMAX_IS_MULTI;
7464 2354 : uint16 bits2 = 0;
7465 2354 : bool has_update = false;
7466 2354 : LockTupleMode strongest = LockTupleKeyShare;
7467 :
7468 : /*
7469 : * We only use this in multis we just created, so they cannot be values
7470 : * pre-pg_upgrade.
7471 : */
7472 2354 : nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7473 :
7474 7188 : for (i = 0; i < nmembers; i++)
7475 : {
7476 : LockTupleMode mode;
7477 :
7478 : /*
7479 : * Remember the strongest lock mode held by any member of the
7480 : * multixact.
7481 : */
7482 4834 : mode = TUPLOCK_from_mxstatus(members[i].status);
7483 4834 : if (mode > strongest)
7484 1322 : strongest = mode;
7485 :
7486 : /* See what other bits we need */
7487 4834 : switch (members[i].status)
7488 : {
7489 4452 : case MultiXactStatusForKeyShare:
7490 : case MultiXactStatusForShare:
7491 : case MultiXactStatusForNoKeyUpdate:
7492 4452 : break;
7493 :
7494 104 : case MultiXactStatusForUpdate:
7495 104 : bits2 |= HEAP_KEYS_UPDATED;
7496 104 : break;
7497 :
7498 258 : case MultiXactStatusNoKeyUpdate:
7499 258 : has_update = true;
7500 258 : break;
7501 :
7502 20 : case MultiXactStatusUpdate:
7503 20 : bits2 |= HEAP_KEYS_UPDATED;
7504 20 : has_update = true;
7505 20 : break;
7506 : }
7507 : }
7508 :
7509 2354 : if (strongest == LockTupleExclusive ||
7510 : strongest == LockTupleNoKeyExclusive)
7511 438 : bits |= HEAP_XMAX_EXCL_LOCK;
7512 1916 : else if (strongest == LockTupleShare)
7513 878 : bits |= HEAP_XMAX_SHR_LOCK;
7514 1038 : else if (strongest == LockTupleKeyShare)
7515 1038 : bits |= HEAP_XMAX_KEYSHR_LOCK;
7516 :
7517 2354 : if (!has_update)
7518 2076 : bits |= HEAP_XMAX_LOCK_ONLY;
7519 :
7520 2354 : if (nmembers > 0)
7521 2354 : pfree(members);
7522 :
7523 2354 : *new_infomask = bits;
7524 2354 : *new_infomask2 = bits2;
7525 2354 : }
7526 :
7527 : /*
7528 : * MultiXactIdGetUpdateXid
7529 : *
7530 : * Given a multixact Xmax and corresponding infomask, which does not have the
7531 : * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7532 : * transaction.
7533 : *
7534 : * Caller is expected to check the status of the updating transaction, if
7535 : * necessary.
7536 : */
7537 : static TransactionId
7538 980 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
7539 : {
7540 980 : TransactionId update_xact = InvalidTransactionId;
7541 : MultiXactMember *members;
7542 : int nmembers;
7543 :
7544 : Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7545 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7546 :
7547 : /*
7548 : * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7549 : * pre-pg_upgrade.
7550 : */
7551 980 : nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7552 :
7553 980 : if (nmembers > 0)
7554 : {
7555 : int i;
7556 :
7557 2548 : for (i = 0; i < nmembers; i++)
7558 : {
7559 : /* Ignore lockers */
7560 2548 : if (!ISUPDATE_from_mxstatus(members[i].status))
7561 1568 : continue;
7562 :
7563 : /* there can be at most one updater */
7564 : Assert(update_xact == InvalidTransactionId);
7565 980 : update_xact = members[i].xid;
7566 : #ifndef USE_ASSERT_CHECKING
7567 :
7568 : /*
7569 : * in an assert-enabled build, walk the whole array to ensure
7570 : * there's no other updater.
7571 : */
7572 980 : break;
7573 : #endif
7574 : }
7575 :
7576 980 : pfree(members);
7577 : }
7578 :
7579 980 : return update_xact;
7580 : }
7581 :
7582 : /*
7583 : * HeapTupleGetUpdateXid
7584 : * As above, but use a HeapTupleHeader
7585 : *
7586 : * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7587 : * checking the hint bits.
7588 : */
7589 : TransactionId
7590 964 : HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
7591 : {
7592 964 : return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tup),
7593 964 : tup->t_infomask);
7594 : }
7595 :
7596 : /*
7597 : * Does the given multixact conflict with the current transaction grabbing a
7598 : * tuple lock of the given strength?
7599 : *
7600 : * The passed infomask pairs up with the given multixact in the tuple header.
7601 : *
7602 : * If current_is_member is not NULL, it is set to 'true' if the current
7603 : * transaction is a member of the given multixact.
7604 : */
7605 : static bool
7606 198 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
7607 : LockTupleMode lockmode, bool *current_is_member)
7608 : {
7609 : int nmembers;
7610 : MultiXactMember *members;
7611 198 : bool result = false;
7612 198 : LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7613 :
7614 198 : if (HEAP_LOCKED_UPGRADED(infomask))
7615 0 : return false;
7616 :
7617 198 : nmembers = GetMultiXactIdMembers(multi, &members, false,
7618 198 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7619 198 : if (nmembers >= 0)
7620 : {
7621 : int i;
7622 :
7623 618 : for (i = 0; i < nmembers; i++)
7624 : {
7625 : TransactionId memxid;
7626 : LOCKMODE memlockmode;
7627 :
7628 434 : if (result && (current_is_member == NULL || *current_is_member))
7629 : break;
7630 :
7631 420 : memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7632 :
7633 : /* ignore members from current xact (but track their presence) */
7634 420 : memxid = members[i].xid;
7635 420 : if (TransactionIdIsCurrentTransactionId(memxid))
7636 : {
7637 184 : if (current_is_member != NULL)
7638 156 : *current_is_member = true;
7639 184 : continue;
7640 : }
7641 236 : else if (result)
7642 16 : continue;
7643 :
7644 : /* ignore members that don't conflict with the lock we want */
7645 220 : if (!DoLockModesConflict(memlockmode, wanted))
7646 142 : continue;
7647 :
7648 78 : if (ISUPDATE_from_mxstatus(members[i].status))
7649 : {
7650 : /* ignore aborted updaters */
7651 34 : if (TransactionIdDidAbort(memxid))
7652 2 : continue;
7653 : }
7654 : else
7655 : {
7656 : /* ignore lockers-only that are no longer in progress */
7657 44 : if (!TransactionIdIsInProgress(memxid))
7658 14 : continue;
7659 : }
7660 :
7661 : /*
7662 : * Whatever remains are either live lockers that conflict with our
7663 : * wanted lock, and updaters that are not aborted. Those conflict
7664 : * with what we want. Set up to return true, but keep going to
7665 : * look for the current transaction among the multixact members,
7666 : * if needed.
7667 : */
7668 62 : result = true;
7669 : }
7670 198 : pfree(members);
7671 : }
7672 :
7673 198 : return result;
7674 : }
7675 :
7676 : /*
7677 : * Do_MultiXactIdWait
7678 : * Actual implementation for the two functions below.
7679 : *
7680 : * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7681 : * needed to ensure we only sleep on conflicting members, and the infomask is
7682 : * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7683 : * indicates whether to use conditional lock acquisition, to allow callers to
7684 : * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7685 : * context information for error messages. 'remaining', if not NULL, receives
7686 : * the number of members that are still running, including any (non-aborted)
7687 : * subtransactions of our own transaction. 'logLockFailure' indicates whether
7688 : * to log details when a lock acquisition fails with 'nowait' enabled.
7689 : *
7690 : * We do this by sleeping on each member using XactLockTableWait. Any
7691 : * members that belong to the current backend are *not* waited for, however;
7692 : * this would not merely be useless but would lead to Assert failure inside
7693 : * XactLockTableWait. By the time this returns, it is certain that all
7694 : * transactions *of other backends* that were members of the MultiXactId
7695 : * that conflict with the requested status are dead (and no new ones can have
7696 : * been added, since it is not legal to add members to an existing
7697 : * MultiXactId).
7698 : *
7699 : * But by the time we finish sleeping, someone else may have changed the Xmax
7700 : * of the containing tuple, so the caller needs to iterate on us somehow.
7701 : *
7702 : * Note that in case we return false, the number of remaining members is
7703 : * not to be trusted.
7704 : */
7705 : static bool
7706 116 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7707 : uint16 infomask, bool nowait,
7708 : Relation rel, ItemPointer ctid, XLTW_Oper oper,
7709 : int *remaining, bool logLockFailure)
7710 : {
7711 116 : bool result = true;
7712 : MultiXactMember *members;
7713 : int nmembers;
7714 116 : int remain = 0;
7715 :
7716 : /* for pre-pg_upgrade tuples, no need to sleep at all */
7717 116 : nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7718 116 : GetMultiXactIdMembers(multi, &members, false,
7719 116 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7720 :
7721 116 : if (nmembers >= 0)
7722 : {
7723 : int i;
7724 :
7725 374 : for (i = 0; i < nmembers; i++)
7726 : {
7727 266 : TransactionId memxid = members[i].xid;
7728 266 : MultiXactStatus memstatus = members[i].status;
7729 :
7730 266 : if (TransactionIdIsCurrentTransactionId(memxid))
7731 : {
7732 48 : remain++;
7733 48 : continue;
7734 : }
7735 :
7736 218 : if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
7737 218 : LOCKMODE_from_mxstatus(status)))
7738 : {
7739 44 : if (remaining && TransactionIdIsInProgress(memxid))
7740 16 : remain++;
7741 44 : continue;
7742 : }
7743 :
7744 : /*
7745 : * This member conflicts with our multi, so we have to sleep (or
7746 : * return failure, if asked to avoid waiting.)
7747 : *
7748 : * Note that we don't set up an error context callback ourselves,
7749 : * but instead we pass the info down to XactLockTableWait. This
7750 : * might seem a bit wasteful because the context is set up and
7751 : * tore down for each member of the multixact, but in reality it
7752 : * should be barely noticeable, and it avoids duplicate code.
7753 : */
7754 174 : if (nowait)
7755 : {
7756 8 : result = ConditionalXactLockTableWait(memxid, logLockFailure);
7757 8 : if (!result)
7758 8 : break;
7759 : }
7760 : else
7761 166 : XactLockTableWait(memxid, rel, ctid, oper);
7762 : }
7763 :
7764 116 : pfree(members);
7765 : }
7766 :
7767 116 : if (remaining)
7768 20 : *remaining = remain;
7769 :
7770 116 : return result;
7771 : }
7772 :
7773 : /*
7774 : * MultiXactIdWait
7775 : * Sleep on a MultiXactId.
7776 : *
7777 : * By the time we finish sleeping, someone else may have changed the Xmax
7778 : * of the containing tuple, so the caller needs to iterate on us somehow.
7779 : *
7780 : * We return (in *remaining, if not NULL) the number of members that are still
7781 : * running, including any (non-aborted) subtransactions of our own transaction.
7782 : */
7783 : static void
7784 108 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
7785 : Relation rel, ItemPointer ctid, XLTW_Oper oper,
7786 : int *remaining)
7787 : {
7788 108 : (void) Do_MultiXactIdWait(multi, status, infomask, false,
7789 : rel, ctid, oper, remaining, false);
7790 108 : }
7791 :
7792 : /*
7793 : * ConditionalMultiXactIdWait
7794 : * As above, but only lock if we can get the lock without blocking.
7795 : *
7796 : * By the time we finish sleeping, someone else may have changed the Xmax
7797 : * of the containing tuple, so the caller needs to iterate on us somehow.
7798 : *
7799 : * If the multixact is now all gone, return true. Returns false if some
7800 : * transactions might still be running.
7801 : *
7802 : * We return (in *remaining, if not NULL) the number of members that are still
7803 : * running, including any (non-aborted) subtransactions of our own transaction.
7804 : */
7805 : static bool
7806 8 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7807 : uint16 infomask, Relation rel, int *remaining,
7808 : bool logLockFailure)
7809 : {
7810 8 : return Do_MultiXactIdWait(multi, status, infomask, true,
7811 : rel, NULL, XLTW_None, remaining, logLockFailure);
7812 : }
7813 :
7814 : /*
7815 : * heap_tuple_needs_eventual_freeze
7816 : *
7817 : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7818 : * will eventually require freezing (if tuple isn't removed by pruning first).
7819 : */
7820 : bool
7821 274502 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
7822 : {
7823 : TransactionId xid;
7824 :
7825 : /*
7826 : * If xmin is a normal transaction ID, this tuple is definitely not
7827 : * frozen.
7828 : */
7829 274502 : xid = HeapTupleHeaderGetXmin(tuple);
7830 274502 : if (TransactionIdIsNormal(xid))
7831 4940 : return true;
7832 :
7833 : /*
7834 : * If xmax is a valid xact or multixact, this tuple is also not frozen.
7835 : */
7836 269562 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7837 : {
7838 : MultiXactId multi;
7839 :
7840 0 : multi = HeapTupleHeaderGetRawXmax(tuple);
7841 0 : if (MultiXactIdIsValid(multi))
7842 0 : return true;
7843 : }
7844 : else
7845 : {
7846 269562 : xid = HeapTupleHeaderGetRawXmax(tuple);
7847 269562 : if (TransactionIdIsNormal(xid))
7848 14 : return true;
7849 : }
7850 :
7851 269548 : if (tuple->t_infomask & HEAP_MOVED)
7852 : {
7853 0 : xid = HeapTupleHeaderGetXvac(tuple);
7854 0 : if (TransactionIdIsNormal(xid))
7855 0 : return true;
7856 : }
7857 :
7858 269548 : return false;
7859 : }
7860 :
7861 : /*
7862 : * heap_tuple_should_freeze
7863 : *
7864 : * Return value indicates if heap_prepare_freeze_tuple sibling function would
7865 : * (or should) force freezing of the heap page that contains caller's tuple.
7866 : * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7867 : * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7868 : *
7869 : * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7870 : * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7871 : * Our working assumption is that caller won't decide to freeze this tuple.
7872 : * It's up to caller to only ratchet back its own top-level trackers after the
7873 : * point that it fully commits to not freezing the tuple/page in question.
7874 : */
7875 : bool
7876 21415266 : heap_tuple_should_freeze(HeapTupleHeader tuple,
7877 : const struct VacuumCutoffs *cutoffs,
7878 : TransactionId *NoFreezePageRelfrozenXid,
7879 : MultiXactId *NoFreezePageRelminMxid)
7880 : {
7881 : TransactionId xid;
7882 : MultiXactId multi;
7883 21415266 : bool freeze = false;
7884 :
7885 : /* First deal with xmin */
7886 21415266 : xid = HeapTupleHeaderGetXmin(tuple);
7887 21415266 : if (TransactionIdIsNormal(xid))
7888 : {
7889 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7890 3555246 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7891 45430 : *NoFreezePageRelfrozenXid = xid;
7892 3555246 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7893 41888 : freeze = true;
7894 : }
7895 :
7896 : /* Now deal with xmax */
7897 21415266 : xid = InvalidTransactionId;
7898 21415266 : multi = InvalidMultiXactId;
7899 21415266 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7900 4 : multi = HeapTupleHeaderGetRawXmax(tuple);
7901 : else
7902 21415262 : xid = HeapTupleHeaderGetRawXmax(tuple);
7903 :
7904 21415266 : if (TransactionIdIsNormal(xid))
7905 : {
7906 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7907 : /* xmax is a non-permanent XID */
7908 18436298 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7909 4 : *NoFreezePageRelfrozenXid = xid;
7910 18436298 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7911 46 : freeze = true;
7912 : }
7913 2978968 : else if (!MultiXactIdIsValid(multi))
7914 : {
7915 : /* xmax is a permanent XID or invalid MultiXactId/XID */
7916 : }
7917 4 : else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
7918 : {
7919 : /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
7920 0 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7921 0 : *NoFreezePageRelminMxid = multi;
7922 : /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
7923 0 : freeze = true;
7924 : }
7925 : else
7926 : {
7927 : /* xmax is a MultiXactId that may have an updater XID */
7928 : MultiXactMember *members;
7929 : int nmembers;
7930 :
7931 : Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
7932 4 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7933 4 : *NoFreezePageRelminMxid = multi;
7934 4 : if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
7935 4 : freeze = true;
7936 :
7937 : /* need to check whether any member of the mxact is old */
7938 4 : nmembers = GetMultiXactIdMembers(multi, &members, false,
7939 4 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
7940 :
7941 10 : for (int i = 0; i < nmembers; i++)
7942 : {
7943 6 : xid = members[i].xid;
7944 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7945 6 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7946 0 : *NoFreezePageRelfrozenXid = xid;
7947 6 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7948 0 : freeze = true;
7949 : }
7950 4 : if (nmembers > 0)
7951 2 : pfree(members);
7952 : }
7953 :
7954 21415266 : if (tuple->t_infomask & HEAP_MOVED)
7955 : {
7956 0 : xid = HeapTupleHeaderGetXvac(tuple);
7957 0 : if (TransactionIdIsNormal(xid))
7958 : {
7959 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7960 0 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7961 0 : *NoFreezePageRelfrozenXid = xid;
7962 : /* heap_prepare_freeze_tuple forces xvac freezing */
7963 0 : freeze = true;
7964 : }
7965 : }
7966 :
7967 21415266 : return freeze;
7968 : }
7969 :
7970 : /*
7971 : * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
7972 : * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
7973 : * that caller is in the process of physically removing, e.g. via HOT pruning
7974 : * or index deletion.
7975 : *
7976 : * Caller must initialize its value to InvalidTransactionId, which is
7977 : * generally interpreted as "definitely no need for a recovery conflict".
7978 : * Final value must reflect all heap tuples that caller will physically remove
7979 : * (or remove TID references to) via its ongoing pruning/deletion operation.
7980 : * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
7981 : * caller's WAL record) by REDO routine when it replays caller's operation.
7982 : */
7983 : void
7984 3013412 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
7985 : TransactionId *snapshotConflictHorizon)
7986 : {
7987 3013412 : TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
7988 3013412 : TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
7989 3013412 : TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
7990 :
7991 3013412 : if (tuple->t_infomask & HEAP_MOVED)
7992 : {
7993 0 : if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
7994 0 : *snapshotConflictHorizon = xvac;
7995 : }
7996 :
7997 : /*
7998 : * Ignore tuples inserted by an aborted transaction or if the tuple was
7999 : * updated/deleted by the inserting transaction.
8000 : *
8001 : * Look for a committed hint bit, or if no xmin bit is set, check clog.
8002 : */
8003 3013412 : if (HeapTupleHeaderXminCommitted(tuple) ||
8004 210914 : (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
8005 : {
8006 5448540 : if (xmax != xmin &&
8007 2589484 : TransactionIdFollows(xmax, *snapshotConflictHorizon))
8008 193242 : *snapshotConflictHorizon = xmax;
8009 : }
8010 3013412 : }
8011 :
8012 : #ifdef USE_PREFETCH
8013 : /*
8014 : * Helper function for heap_index_delete_tuples. Issues prefetch requests for
8015 : * prefetch_count buffers. The prefetch_state keeps track of all the buffers
8016 : * we can prefetch, and which have already been prefetched; each call to this
8017 : * function picks up where the previous call left off.
8018 : *
8019 : * Note: we expect the deltids array to be sorted in an order that groups TIDs
8020 : * by heap block, with all TIDs for each block appearing together in exactly
8021 : * one group.
8022 : */
8023 : static void
8024 38284 : index_delete_prefetch_buffer(Relation rel,
8025 : IndexDeletePrefetchState *prefetch_state,
8026 : int prefetch_count)
8027 : {
8028 38284 : BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
8029 38284 : int count = 0;
8030 : int i;
8031 38284 : int ndeltids = prefetch_state->ndeltids;
8032 38284 : TM_IndexDelete *deltids = prefetch_state->deltids;
8033 :
8034 38284 : for (i = prefetch_state->next_item;
8035 1339968 : i < ndeltids && count < prefetch_count;
8036 1301684 : i++)
8037 : {
8038 1301684 : ItemPointer htid = &deltids[i].tid;
8039 :
8040 2591940 : if (cur_hblkno == InvalidBlockNumber ||
8041 1290256 : ItemPointerGetBlockNumber(htid) != cur_hblkno)
8042 : {
8043 35028 : cur_hblkno = ItemPointerGetBlockNumber(htid);
8044 35028 : PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
8045 35028 : count++;
8046 : }
8047 : }
8048 :
8049 : /*
8050 : * Save the prefetch position so that next time we can continue from that
8051 : * position.
8052 : */
8053 38284 : prefetch_state->next_item = i;
8054 38284 : prefetch_state->cur_hblkno = cur_hblkno;
8055 38284 : }
8056 : #endif
8057 :
8058 : /*
8059 : * Helper function for heap_index_delete_tuples. Checks for index corruption
8060 : * involving an invalid TID in index AM caller's index page.
8061 : *
8062 : * This is an ideal place for these checks. The index AM must hold a buffer
8063 : * lock on the index page containing the TIDs we examine here, so we don't
8064 : * have to worry about concurrent VACUUMs at all. We can be sure that the
8065 : * index is corrupt when htid points directly to an LP_UNUSED item or
8066 : * heap-only tuple, which is not the case during standard index scans.
8067 : */
8068 : static inline void
8069 1076428 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
8070 : Page page, OffsetNumber maxoff,
8071 : ItemPointer htid, TM_IndexStatus *istatus)
8072 : {
8073 1076428 : OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
8074 : ItemId iid;
8075 :
8076 : Assert(OffsetNumberIsValid(istatus->idxoffnum));
8077 :
8078 1076428 : if (unlikely(indexpagehoffnum > maxoff))
8079 0 : ereport(ERROR,
8080 : (errcode(ERRCODE_INDEX_CORRUPTED),
8081 : errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8082 : ItemPointerGetBlockNumber(htid),
8083 : indexpagehoffnum,
8084 : istatus->idxoffnum, delstate->iblknum,
8085 : RelationGetRelationName(delstate->irel))));
8086 :
8087 1076428 : iid = PageGetItemId(page, indexpagehoffnum);
8088 1076428 : if (unlikely(!ItemIdIsUsed(iid)))
8089 0 : ereport(ERROR,
8090 : (errcode(ERRCODE_INDEX_CORRUPTED),
8091 : errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8092 : ItemPointerGetBlockNumber(htid),
8093 : indexpagehoffnum,
8094 : istatus->idxoffnum, delstate->iblknum,
8095 : RelationGetRelationName(delstate->irel))));
8096 :
8097 1076428 : if (ItemIdHasStorage(iid))
8098 : {
8099 : HeapTupleHeader htup;
8100 :
8101 : Assert(ItemIdIsNormal(iid));
8102 641472 : htup = (HeapTupleHeader) PageGetItem(page, iid);
8103 :
8104 641472 : if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
8105 0 : ereport(ERROR,
8106 : (errcode(ERRCODE_INDEX_CORRUPTED),
8107 : errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8108 : ItemPointerGetBlockNumber(htid),
8109 : indexpagehoffnum,
8110 : istatus->idxoffnum, delstate->iblknum,
8111 : RelationGetRelationName(delstate->irel))));
8112 : }
8113 1076428 : }
8114 :
8115 : /*
8116 : * heapam implementation of tableam's index_delete_tuples interface.
8117 : *
8118 : * This helper function is called by index AMs during index tuple deletion.
8119 : * See tableam header comments for an explanation of the interface implemented
8120 : * here and a general theory of operation. Note that each call here is either
8121 : * a simple index deletion call, or a bottom-up index deletion call.
8122 : *
8123 : * It's possible for this to generate a fair amount of I/O, since we may be
8124 : * deleting hundreds of tuples from a single index block. To amortize that
8125 : * cost to some degree, this uses prefetching and combines repeat accesses to
8126 : * the same heap block.
8127 : */
8128 : TransactionId
8129 11428 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
8130 : {
8131 : /* Initial assumption is that earlier pruning took care of conflict */
8132 11428 : TransactionId snapshotConflictHorizon = InvalidTransactionId;
8133 11428 : BlockNumber blkno = InvalidBlockNumber;
8134 11428 : Buffer buf = InvalidBuffer;
8135 11428 : Page page = NULL;
8136 11428 : OffsetNumber maxoff = InvalidOffsetNumber;
8137 : TransactionId priorXmax;
8138 : #ifdef USE_PREFETCH
8139 : IndexDeletePrefetchState prefetch_state;
8140 : int prefetch_distance;
8141 : #endif
8142 : SnapshotData SnapshotNonVacuumable;
8143 11428 : int finalndeltids = 0,
8144 11428 : nblocksaccessed = 0;
8145 :
8146 : /* State that's only used in bottom-up index deletion case */
8147 11428 : int nblocksfavorable = 0;
8148 11428 : int curtargetfreespace = delstate->bottomupfreespace,
8149 11428 : lastfreespace = 0,
8150 11428 : actualfreespace = 0;
8151 11428 : bool bottomup_final_block = false;
8152 :
8153 11428 : InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
8154 :
8155 : /* Sort caller's deltids array by TID for further processing */
8156 11428 : index_delete_sort(delstate);
8157 :
8158 : /*
8159 : * Bottom-up case: resort deltids array in an order attuned to where the
8160 : * greatest number of promising TIDs are to be found, and determine how
8161 : * many blocks from the start of sorted array should be considered
8162 : * favorable. This will also shrink the deltids array in order to
8163 : * eliminate completely unfavorable blocks up front.
8164 : */
8165 11428 : if (delstate->bottomup)
8166 3976 : nblocksfavorable = bottomup_sort_and_shrink(delstate);
8167 :
8168 : #ifdef USE_PREFETCH
8169 : /* Initialize prefetch state. */
8170 11428 : prefetch_state.cur_hblkno = InvalidBlockNumber;
8171 11428 : prefetch_state.next_item = 0;
8172 11428 : prefetch_state.ndeltids = delstate->ndeltids;
8173 11428 : prefetch_state.deltids = delstate->deltids;
8174 :
8175 : /*
8176 : * Determine the prefetch distance that we will attempt to maintain.
8177 : *
8178 : * Since the caller holds a buffer lock somewhere in rel, we'd better make
8179 : * sure that isn't a catalog relation before we call code that does
8180 : * syscache lookups, to avoid risk of deadlock.
8181 : */
8182 11428 : if (IsCatalogRelation(rel))
8183 8234 : prefetch_distance = maintenance_io_concurrency;
8184 : else
8185 : prefetch_distance =
8186 3194 : get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
8187 :
8188 : /* Cap initial prefetch distance for bottom-up deletion caller */
8189 11428 : if (delstate->bottomup)
8190 : {
8191 : Assert(nblocksfavorable >= 1);
8192 : Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
8193 3976 : prefetch_distance = Min(prefetch_distance, nblocksfavorable);
8194 : }
8195 :
8196 : /* Start prefetching. */
8197 11428 : index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
8198 : #endif
8199 :
8200 : /* Iterate over deltids, determine which to delete, check their horizon */
8201 : Assert(delstate->ndeltids > 0);
8202 1087856 : for (int i = 0; i < delstate->ndeltids; i++)
8203 : {
8204 1080404 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8205 1080404 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8206 1080404 : ItemPointer htid = &ideltid->tid;
8207 : OffsetNumber offnum;
8208 :
8209 : /*
8210 : * Read buffer, and perform required extra steps each time a new block
8211 : * is encountered. Avoid refetching if it's the same block as the one
8212 : * from the last htid.
8213 : */
8214 2149380 : if (blkno == InvalidBlockNumber ||
8215 1068976 : ItemPointerGetBlockNumber(htid) != blkno)
8216 : {
8217 : /*
8218 : * Consider giving up early for bottom-up index deletion caller
8219 : * first. (Only prefetch next-next block afterwards, when it
8220 : * becomes clear that we're at least going to access the next
8221 : * block in line.)
8222 : *
8223 : * Sometimes the first block frees so much space for bottom-up
8224 : * caller that the deletion process can end without accessing any
8225 : * more blocks. It is usually necessary to access 2 or 3 blocks
8226 : * per bottom-up deletion operation, though.
8227 : */
8228 30832 : if (delstate->bottomup)
8229 : {
8230 : /*
8231 : * We often allow caller to delete a few additional items
8232 : * whose entries we reached after the point that space target
8233 : * from caller was satisfied. The cost of accessing the page
8234 : * was already paid at that point, so it made sense to finish
8235 : * it off. When that happened, we finalize everything here
8236 : * (by finishing off the whole bottom-up deletion operation
8237 : * without needlessly paying the cost of accessing any more
8238 : * blocks).
8239 : */
8240 8830 : if (bottomup_final_block)
8241 302 : break;
8242 :
8243 : /*
8244 : * Give up when we didn't enable our caller to free any
8245 : * additional space as a result of processing the page that we
8246 : * just finished up with. This rule is the main way in which
8247 : * we keep the cost of bottom-up deletion under control.
8248 : */
8249 8528 : if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
8250 3674 : break;
8251 4854 : lastfreespace = actualfreespace; /* for next time */
8252 :
8253 : /*
8254 : * Deletion operation (which is bottom-up) will definitely
8255 : * access the next block in line. Prepare for that now.
8256 : *
8257 : * Decay target free space so that we don't hang on for too
8258 : * long with a marginal case. (Space target is only truly
8259 : * helpful when it allows us to recognize that we don't need
8260 : * to access more than 1 or 2 blocks to satisfy caller due to
8261 : * agreeable workload characteristics.)
8262 : *
8263 : * We are a bit more patient when we encounter contiguous
8264 : * blocks, though: these are treated as favorable blocks. The
8265 : * decay process is only applied when the next block in line
8266 : * is not a favorable/contiguous block. This is not an
8267 : * exception to the general rule; we still insist on finding
8268 : * at least one deletable item per block accessed. See
8269 : * bottomup_nblocksfavorable() for full details of the theory
8270 : * behind favorable blocks and heap block locality in general.
8271 : *
8272 : * Note: The first block in line is always treated as a
8273 : * favorable block, so the earliest possible point that the
8274 : * decay can be applied is just before we access the second
8275 : * block in line. The Assert() verifies this for us.
8276 : */
8277 : Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
8278 4854 : if (nblocksfavorable > 0)
8279 4310 : nblocksfavorable--;
8280 : else
8281 544 : curtargetfreespace /= 2;
8282 : }
8283 :
8284 : /* release old buffer */
8285 26856 : if (BufferIsValid(buf))
8286 15428 : UnlockReleaseBuffer(buf);
8287 :
8288 26856 : blkno = ItemPointerGetBlockNumber(htid);
8289 26856 : buf = ReadBuffer(rel, blkno);
8290 26856 : nblocksaccessed++;
8291 : Assert(!delstate->bottomup ||
8292 : nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
8293 :
8294 : #ifdef USE_PREFETCH
8295 :
8296 : /*
8297 : * To maintain the prefetch distance, prefetch one more page for
8298 : * each page we read.
8299 : */
8300 26856 : index_delete_prefetch_buffer(rel, &prefetch_state, 1);
8301 : #endif
8302 :
8303 26856 : LockBuffer(buf, BUFFER_LOCK_SHARE);
8304 :
8305 26856 : page = BufferGetPage(buf);
8306 26856 : maxoff = PageGetMaxOffsetNumber(page);
8307 : }
8308 :
8309 : /*
8310 : * In passing, detect index corruption involving an index page with a
8311 : * TID that points to a location in the heap that couldn't possibly be
8312 : * correct. We only do this with actual TIDs from caller's index page
8313 : * (not items reached by traversing through a HOT chain).
8314 : */
8315 1076428 : index_delete_check_htid(delstate, page, maxoff, htid, istatus);
8316 :
8317 1076428 : if (istatus->knowndeletable)
8318 : Assert(!delstate->bottomup && !istatus->promising);
8319 : else
8320 : {
8321 811236 : ItemPointerData tmp = *htid;
8322 : HeapTupleData heapTuple;
8323 :
8324 : /* Are any tuples from this HOT chain non-vacuumable? */
8325 811236 : if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
8326 : &heapTuple, NULL, true))
8327 490834 : continue; /* can't delete entry */
8328 :
8329 : /* Caller will delete, since whole HOT chain is vacuumable */
8330 320402 : istatus->knowndeletable = true;
8331 :
8332 : /* Maintain index free space info for bottom-up deletion case */
8333 320402 : if (delstate->bottomup)
8334 : {
8335 : Assert(istatus->freespace > 0);
8336 17304 : actualfreespace += istatus->freespace;
8337 17304 : if (actualfreespace >= curtargetfreespace)
8338 4330 : bottomup_final_block = true;
8339 : }
8340 : }
8341 :
8342 : /*
8343 : * Maintain snapshotConflictHorizon value for deletion operation as a
8344 : * whole by advancing current value using heap tuple headers. This is
8345 : * loosely based on the logic for pruning a HOT chain.
8346 : */
8347 585594 : offnum = ItemPointerGetOffsetNumber(htid);
8348 585594 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8349 : for (;;)
8350 41390 : {
8351 : ItemId lp;
8352 : HeapTupleHeader htup;
8353 :
8354 : /* Sanity check (pure paranoia) */
8355 626984 : if (offnum < FirstOffsetNumber)
8356 0 : break;
8357 :
8358 : /*
8359 : * An offset past the end of page's line pointer array is possible
8360 : * when the array was truncated
8361 : */
8362 626984 : if (offnum > maxoff)
8363 0 : break;
8364 :
8365 626984 : lp = PageGetItemId(page, offnum);
8366 626984 : if (ItemIdIsRedirected(lp))
8367 : {
8368 18578 : offnum = ItemIdGetRedirect(lp);
8369 18578 : continue;
8370 : }
8371 :
8372 : /*
8373 : * We'll often encounter LP_DEAD line pointers (especially with an
8374 : * entry marked knowndeletable by our caller up front). No heap
8375 : * tuple headers get examined for an htid that leads us to an
8376 : * LP_DEAD item. This is okay because the earlier pruning
8377 : * operation that made the line pointer LP_DEAD in the first place
8378 : * must have considered the original tuple header as part of
8379 : * generating its own snapshotConflictHorizon value.
8380 : *
8381 : * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8382 : * the same strategy that index vacuuming uses in all cases. Index
8383 : * VACUUM WAL records don't even have a snapshotConflictHorizon
8384 : * field of their own for this reason.
8385 : */
8386 608406 : if (!ItemIdIsNormal(lp))
8387 385260 : break;
8388 :
8389 223146 : htup = (HeapTupleHeader) PageGetItem(page, lp);
8390 :
8391 : /*
8392 : * Check the tuple XMIN against prior XMAX, if any
8393 : */
8394 245958 : if (TransactionIdIsValid(priorXmax) &&
8395 22812 : !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
8396 0 : break;
8397 :
8398 223146 : HeapTupleHeaderAdvanceConflictHorizon(htup,
8399 : &snapshotConflictHorizon);
8400 :
8401 : /*
8402 : * If the tuple is not HOT-updated, then we are at the end of this
8403 : * HOT-chain. No need to visit later tuples from the same update
8404 : * chain (they get their own index entries) -- just move on to
8405 : * next htid from index AM caller.
8406 : */
8407 223146 : if (!HeapTupleHeaderIsHotUpdated(htup))
8408 200334 : break;
8409 :
8410 : /* Advance to next HOT chain member */
8411 : Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8412 22812 : offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8413 22812 : priorXmax = HeapTupleHeaderGetUpdateXid(htup);
8414 : }
8415 :
8416 : /* Enable further/final shrinking of deltids for caller */
8417 585594 : finalndeltids = i + 1;
8418 : }
8419 :
8420 11428 : UnlockReleaseBuffer(buf);
8421 :
8422 : /*
8423 : * Shrink deltids array to exclude non-deletable entries at the end. This
8424 : * is not just a minor optimization. Final deltids array size might be
8425 : * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8426 : * ndeltids being zero in all cases with zero total deletable entries.
8427 : */
8428 : Assert(finalndeltids > 0 || delstate->bottomup);
8429 11428 : delstate->ndeltids = finalndeltids;
8430 :
8431 11428 : return snapshotConflictHorizon;
8432 : }
8433 :
8434 : /*
8435 : * Specialized inlineable comparison function for index_delete_sort()
8436 : */
8437 : static inline int
8438 25665260 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
8439 : {
8440 25665260 : ItemPointer tid1 = &deltid1->tid;
8441 25665260 : ItemPointer tid2 = &deltid2->tid;
8442 :
8443 : {
8444 25665260 : BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
8445 25665260 : BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
8446 :
8447 25665260 : if (blk1 != blk2)
8448 10526068 : return (blk1 < blk2) ? -1 : 1;
8449 : }
8450 : {
8451 15139192 : OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
8452 15139192 : OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
8453 :
8454 15139192 : if (pos1 != pos2)
8455 15139192 : return (pos1 < pos2) ? -1 : 1;
8456 : }
8457 :
8458 : Assert(false);
8459 :
8460 0 : return 0;
8461 : }
8462 :
8463 : /*
8464 : * Sort deltids array from delstate by TID. This prepares it for further
8465 : * processing by heap_index_delete_tuples().
8466 : *
8467 : * This operation becomes a noticeable consumer of CPU cycles with some
8468 : * workloads, so we go to the trouble of specialization/micro optimization.
8469 : * We use shellsort for this because it's easy to specialize, compiles to
8470 : * relatively few instructions, and is adaptive to presorted inputs/subsets
8471 : * (which are typical here).
8472 : */
8473 : static void
8474 11428 : index_delete_sort(TM_IndexDeleteOp *delstate)
8475 : {
8476 11428 : TM_IndexDelete *deltids = delstate->deltids;
8477 11428 : int ndeltids = delstate->ndeltids;
8478 :
8479 : /*
8480 : * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8481 : *
8482 : * This implementation is fast with array sizes up to ~4500. This covers
8483 : * all supported BLCKSZ values.
8484 : */
8485 11428 : const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8486 :
8487 : /* Think carefully before changing anything here -- keep swaps cheap */
8488 : StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8489 : "element size exceeds 8 bytes");
8490 :
8491 114280 : for (int g = 0; g < lengthof(gaps); g++)
8492 : {
8493 15268066 : for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8494 : {
8495 15165214 : TM_IndexDelete d = deltids[i];
8496 15165214 : int j = i;
8497 :
8498 26414814 : while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8499 : {
8500 11249600 : deltids[j] = deltids[j - hi];
8501 11249600 : j -= hi;
8502 : }
8503 15165214 : deltids[j] = d;
8504 : }
8505 : }
8506 11428 : }
8507 :
8508 : /*
8509 : * Returns how many blocks should be considered favorable/contiguous for a
8510 : * bottom-up index deletion pass. This is a number of heap blocks that starts
8511 : * from and includes the first block in line.
8512 : *
8513 : * There is always at least one favorable block during bottom-up index
8514 : * deletion. In the worst case (i.e. with totally random heap blocks) the
8515 : * first block in line (the only favorable block) can be thought of as a
8516 : * degenerate array of contiguous blocks that consists of a single block.
8517 : * heap_index_delete_tuples() will expect this.
8518 : *
8519 : * Caller passes blockgroups, a description of the final order that deltids
8520 : * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8521 : * processing. Note that deltids need not actually be sorted just yet (caller
8522 : * only passes deltids to us so that we can interpret blockgroups).
8523 : *
8524 : * You might guess that the existence of contiguous blocks cannot matter much,
8525 : * since in general the main factor that determines which blocks we visit is
8526 : * the number of promising TIDs, which is a fixed hint from the index AM.
8527 : * We're not really targeting the general case, though -- the actual goal is
8528 : * to adapt our behavior to a wide variety of naturally occurring conditions.
8529 : * The effects of most of the heuristics we apply are only noticeable in the
8530 : * aggregate, over time and across many _related_ bottom-up index deletion
8531 : * passes.
8532 : *
8533 : * Deeming certain blocks favorable allows heapam to recognize and adapt to
8534 : * workloads where heap blocks visited during bottom-up index deletion can be
8535 : * accessed contiguously, in the sense that each newly visited block is the
8536 : * neighbor of the block that bottom-up deletion just finished processing (or
8537 : * close enough to it). It will likely be cheaper to access more favorable
8538 : * blocks sooner rather than later (e.g. in this pass, not across a series of
8539 : * related bottom-up passes). Either way it is probably only a matter of time
8540 : * (or a matter of further correlated version churn) before all blocks that
8541 : * appear together as a single large batch of favorable blocks get accessed by
8542 : * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8543 : * appear almost constantly or not even once (it all depends on per-index
8544 : * workload characteristics).
8545 : *
8546 : * Note that the blockgroups sort order applies a power-of-two bucketing
8547 : * scheme that creates opportunities for contiguous groups of blocks to get
8548 : * batched together, at least with workloads that are naturally amenable to
8549 : * being driven by heap block locality. This doesn't just enhance the spatial
8550 : * locality of bottom-up heap block processing in the obvious way. It also
8551 : * enables temporal locality of access, since sorting by heap block number
8552 : * naturally tends to make the bottom-up processing order deterministic.
8553 : *
8554 : * Consider the following example to get a sense of how temporal locality
8555 : * might matter: There is a heap relation with several indexes, each of which
8556 : * is low to medium cardinality. It is subject to constant non-HOT updates.
8557 : * The updates are skewed (in one part of the primary key, perhaps). None of
8558 : * the indexes are logically modified by the UPDATE statements (if they were
8559 : * then bottom-up index deletion would not be triggered in the first place).
8560 : * Naturally, each new round of index tuples (for each heap tuple that gets a
8561 : * heap_update() call) will have the same heap TID in each and every index.
8562 : * Since these indexes are low cardinality and never get logically modified,
8563 : * heapam processing during bottom-up deletion passes will access heap blocks
8564 : * in approximately sequential order. Temporal locality of access occurs due
8565 : * to bottom-up deletion passes behaving very similarly across each of the
8566 : * indexes at any given moment. This keeps the number of buffer misses needed
8567 : * to visit heap blocks to a minimum.
8568 : */
8569 : static int
8570 3976 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
8571 : TM_IndexDelete *deltids)
8572 : {
8573 3976 : int64 lastblock = -1;
8574 3976 : int nblocksfavorable = 0;
8575 :
8576 : Assert(nblockgroups >= 1);
8577 : Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
8578 :
8579 : /*
8580 : * We tolerate heap blocks that will be accessed only slightly out of
8581 : * physical order. Small blips occur when a pair of almost-contiguous
8582 : * blocks happen to fall into different buckets (perhaps due only to a
8583 : * small difference in npromisingtids that the bucketing scheme didn't
8584 : * quite manage to ignore). We effectively ignore these blips by applying
8585 : * a small tolerance. The precise tolerance we use is a little arbitrary,
8586 : * but it works well enough in practice.
8587 : */
8588 12730 : for (int b = 0; b < nblockgroups; b++)
8589 : {
8590 12182 : IndexDeleteCounts *group = blockgroups + b;
8591 12182 : TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8592 12182 : BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
8593 :
8594 12182 : if (lastblock != -1 &&
8595 8206 : ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
8596 7140 : (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
8597 : break;
8598 :
8599 8754 : nblocksfavorable++;
8600 8754 : lastblock = block;
8601 : }
8602 :
8603 : /* Always indicate that there is at least 1 favorable block */
8604 : Assert(nblocksfavorable >= 1);
8605 :
8606 3976 : return nblocksfavorable;
8607 : }
8608 :
8609 : /*
8610 : * qsort comparison function for bottomup_sort_and_shrink()
8611 : */
8612 : static int
8613 411006 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8614 : {
8615 411006 : const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
8616 411006 : const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
8617 :
8618 : /*
8619 : * Most significant field is npromisingtids (which we invert the order of
8620 : * so as to sort in desc order).
8621 : *
8622 : * Caller should have already normalized npromisingtids fields into
8623 : * power-of-two values (buckets).
8624 : */
8625 411006 : if (group1->npromisingtids > group2->npromisingtids)
8626 19034 : return -1;
8627 391972 : if (group1->npromisingtids < group2->npromisingtids)
8628 21898 : return 1;
8629 :
8630 : /*
8631 : * Tiebreak: desc ntids sort order.
8632 : *
8633 : * We cannot expect power-of-two values for ntids fields. We should
8634 : * behave as if they were already rounded up for us instead.
8635 : */
8636 370074 : if (group1->ntids != group2->ntids)
8637 : {
8638 258210 : uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
8639 258210 : uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
8640 :
8641 258210 : if (ntids1 > ntids2)
8642 40950 : return -1;
8643 217260 : if (ntids1 < ntids2)
8644 52356 : return 1;
8645 : }
8646 :
8647 : /*
8648 : * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8649 : * block in deltids array) order.
8650 : *
8651 : * This is equivalent to sorting in ascending heap block number order
8652 : * (among otherwise equal subsets of the array). This approach allows us
8653 : * to avoid accessing the out-of-line TID. (We rely on the assumption
8654 : * that the deltids array was sorted in ascending heap TID order when
8655 : * these offsets to the first TID from each heap block group were formed.)
8656 : */
8657 276768 : if (group1->ifirsttid > group2->ifirsttid)
8658 136496 : return 1;
8659 140272 : if (group1->ifirsttid < group2->ifirsttid)
8660 140272 : return -1;
8661 :
8662 0 : pg_unreachable();
8663 :
8664 : return 0;
8665 : }
8666 :
8667 : /*
8668 : * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8669 : *
8670 : * Sorts deltids array in the order needed for useful processing by bottom-up
8671 : * deletion. The array should already be sorted in TID order when we're
8672 : * called. The sort process groups heap TIDs from deltids into heap block
8673 : * groupings. Earlier/more-promising groups/blocks are usually those that are
8674 : * known to have the most "promising" TIDs.
8675 : *
8676 : * Sets new size of deltids array (ndeltids) in state. deltids will only have
8677 : * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8678 : * return. This often means that deltids will be shrunk to a small fraction
8679 : * of its original size (we eliminate many heap blocks from consideration for
8680 : * caller up front).
8681 : *
8682 : * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8683 : * for a definition and full details.
8684 : */
8685 : static int
8686 3976 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
8687 : {
8688 : IndexDeleteCounts *blockgroups;
8689 : TM_IndexDelete *reordereddeltids;
8690 3976 : BlockNumber curblock = InvalidBlockNumber;
8691 3976 : int nblockgroups = 0;
8692 3976 : int ncopied = 0;
8693 3976 : int nblocksfavorable = 0;
8694 :
8695 : Assert(delstate->bottomup);
8696 : Assert(delstate->ndeltids > 0);
8697 :
8698 : /* Calculate per-heap-block count of TIDs */
8699 3976 : blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
8700 1904158 : for (int i = 0; i < delstate->ndeltids; i++)
8701 : {
8702 1900182 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8703 1900182 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8704 1900182 : ItemPointer htid = &ideltid->tid;
8705 1900182 : bool promising = istatus->promising;
8706 :
8707 1900182 : if (curblock != ItemPointerGetBlockNumber(htid))
8708 : {
8709 : /* New block group */
8710 79706 : nblockgroups++;
8711 :
8712 : Assert(curblock < ItemPointerGetBlockNumber(htid) ||
8713 : !BlockNumberIsValid(curblock));
8714 :
8715 79706 : curblock = ItemPointerGetBlockNumber(htid);
8716 79706 : blockgroups[nblockgroups - 1].ifirsttid = i;
8717 79706 : blockgroups[nblockgroups - 1].ntids = 1;
8718 79706 : blockgroups[nblockgroups - 1].npromisingtids = 0;
8719 : }
8720 : else
8721 : {
8722 1820476 : blockgroups[nblockgroups - 1].ntids++;
8723 : }
8724 :
8725 1900182 : if (promising)
8726 243664 : blockgroups[nblockgroups - 1].npromisingtids++;
8727 : }
8728 :
8729 : /*
8730 : * We're about ready to sort block groups to determine the optimal order
8731 : * for visiting heap blocks. But before we do, round the number of
8732 : * promising tuples for each block group up to the next power-of-two,
8733 : * unless it is very low (less than 4), in which case we round up to 4.
8734 : * npromisingtids is far too noisy to trust when choosing between a pair
8735 : * of block groups that both have very low values.
8736 : *
8737 : * This scheme divides heap blocks/block groups into buckets. Each bucket
8738 : * contains blocks that have _approximately_ the same number of promising
8739 : * TIDs as each other. The goal is to ignore relatively small differences
8740 : * in the total number of promising entries, so that the whole process can
8741 : * give a little weight to heapam factors (like heap block locality)
8742 : * instead. This isn't a trade-off, really -- we have nothing to lose. It
8743 : * would be foolish to interpret small differences in npromisingtids
8744 : * values as anything more than noise.
8745 : *
8746 : * We tiebreak on nhtids when sorting block group subsets that have the
8747 : * same npromisingtids, but this has the same issues as npromisingtids,
8748 : * and so nhtids is subject to the same power-of-two bucketing scheme. The
8749 : * only reason that we don't fix nhtids in the same way here too is that
8750 : * we'll need accurate nhtids values after the sort. We handle nhtids
8751 : * bucketization dynamically instead (in the sort comparator).
8752 : *
8753 : * See bottomup_nblocksfavorable() for a full explanation of when and how
8754 : * heap locality/favorable blocks can significantly influence when and how
8755 : * heap blocks are accessed.
8756 : */
8757 83682 : for (int b = 0; b < nblockgroups; b++)
8758 : {
8759 79706 : IndexDeleteCounts *group = blockgroups + b;
8760 :
8761 : /* Better off falling back on nhtids with low npromisingtids */
8762 79706 : if (group->npromisingtids <= 4)
8763 68530 : group->npromisingtids = 4;
8764 : else
8765 11176 : group->npromisingtids =
8766 11176 : pg_nextpower2_32((uint32) group->npromisingtids);
8767 : }
8768 :
8769 : /* Sort groups and rearrange caller's deltids array */
8770 3976 : qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
8771 : bottomup_sort_and_shrink_cmp);
8772 3976 : reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8773 :
8774 3976 : nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
8775 : /* Determine number of favorable blocks at the start of final deltids */
8776 3976 : nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
8777 : delstate->deltids);
8778 :
8779 26612 : for (int b = 0; b < nblockgroups; b++)
8780 : {
8781 22636 : IndexDeleteCounts *group = blockgroups + b;
8782 22636 : TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8783 :
8784 22636 : memcpy(reordereddeltids + ncopied, firstdtid,
8785 22636 : sizeof(TM_IndexDelete) * group->ntids);
8786 22636 : ncopied += group->ntids;
8787 : }
8788 :
8789 : /* Copy final grouped and sorted TIDs back into start of caller's array */
8790 3976 : memcpy(delstate->deltids, reordereddeltids,
8791 : sizeof(TM_IndexDelete) * ncopied);
8792 3976 : delstate->ndeltids = ncopied;
8793 :
8794 3976 : pfree(reordereddeltids);
8795 3976 : pfree(blockgroups);
8796 :
8797 3976 : return nblocksfavorable;
8798 : }
8799 :
8800 : /*
8801 : * Perform XLogInsert for a heap-visible operation. 'block' is the block
8802 : * being marked all-visible, and vm_buffer is the buffer containing the
8803 : * corresponding visibility map block. Both should have already been modified
8804 : * and dirtied.
8805 : *
8806 : * snapshotConflictHorizon comes from the largest xmin on the page being
8807 : * marked all-visible. REDO routine uses it to generate recovery conflicts.
8808 : *
8809 : * If checksums or wal_log_hints are enabled, we may also generate a full-page
8810 : * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
8811 : * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
8812 : * update the heap page's LSN.
8813 : */
8814 : XLogRecPtr
8815 67046 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
8816 : TransactionId snapshotConflictHorizon, uint8 vmflags)
8817 : {
8818 : xl_heap_visible xlrec;
8819 : XLogRecPtr recptr;
8820 : uint8 flags;
8821 :
8822 : Assert(BufferIsValid(heap_buffer));
8823 : Assert(BufferIsValid(vm_buffer));
8824 :
8825 67046 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
8826 67046 : xlrec.flags = vmflags;
8827 67046 : if (RelationIsAccessibleInLogicalDecoding(rel))
8828 108 : xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
8829 67046 : XLogBeginInsert();
8830 67046 : XLogRegisterData(&xlrec, SizeOfHeapVisible);
8831 :
8832 67046 : XLogRegisterBuffer(0, vm_buffer, 0);
8833 :
8834 67046 : flags = REGBUF_STANDARD;
8835 67046 : if (!XLogHintBitIsNeeded())
8836 6144 : flags |= REGBUF_NO_IMAGE;
8837 67046 : XLogRegisterBuffer(1, heap_buffer, flags);
8838 :
8839 67046 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
8840 :
8841 67046 : return recptr;
8842 : }
8843 :
8844 : /*
8845 : * Perform XLogInsert for a heap-update operation. Caller must already
8846 : * have modified the buffer(s) and marked them dirty.
8847 : */
8848 : static XLogRecPtr
8849 586358 : log_heap_update(Relation reln, Buffer oldbuf,
8850 : Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
8851 : HeapTuple old_key_tuple,
8852 : bool all_visible_cleared, bool new_all_visible_cleared)
8853 : {
8854 : xl_heap_update xlrec;
8855 : xl_heap_header xlhdr;
8856 : xl_heap_header xlhdr_idx;
8857 : uint8 info;
8858 : uint16 prefix_suffix[2];
8859 586358 : uint16 prefixlen = 0,
8860 586358 : suffixlen = 0;
8861 : XLogRecPtr recptr;
8862 586358 : Page page = BufferGetPage(newbuf);
8863 586358 : bool need_tuple_data = RelationIsLogicallyLogged(reln);
8864 : bool init;
8865 : int bufflags;
8866 :
8867 : /* Caller should not call me on a non-WAL-logged relation */
8868 : Assert(RelationNeedsWAL(reln));
8869 :
8870 586358 : XLogBeginInsert();
8871 :
8872 586358 : if (HeapTupleIsHeapOnly(newtup))
8873 283564 : info = XLOG_HEAP_HOT_UPDATE;
8874 : else
8875 302794 : info = XLOG_HEAP_UPDATE;
8876 :
8877 : /*
8878 : * If the old and new tuple are on the same page, we only need to log the
8879 : * parts of the new tuple that were changed. That saves on the amount of
8880 : * WAL we need to write. Currently, we just count any unchanged bytes in
8881 : * the beginning and end of the tuple. That's quick to check, and
8882 : * perfectly covers the common case that only one field is updated.
8883 : *
8884 : * We could do this even if the old and new tuple are on different pages,
8885 : * but only if we don't make a full-page image of the old page, which is
8886 : * difficult to know in advance. Also, if the old tuple is corrupt for
8887 : * some reason, it would allow the corruption to propagate the new page,
8888 : * so it seems best to avoid. Under the general assumption that most
8889 : * updates tend to create the new tuple version on the same page, there
8890 : * isn't much to be gained by doing this across pages anyway.
8891 : *
8892 : * Skip this if we're taking a full-page image of the new page, as we
8893 : * don't include the new tuple in the WAL record in that case. Also
8894 : * disable if wal_level='logical', as logical decoding needs to be able to
8895 : * read the new tuple in whole from the WAL record alone.
8896 : */
8897 586358 : if (oldbuf == newbuf && !need_tuple_data &&
8898 283382 : !XLogCheckBufferNeedsBackup(newbuf))
8899 : {
8900 282388 : char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8901 282388 : char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8902 282388 : int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8903 282388 : int newlen = newtup->t_len - newtup->t_data->t_hoff;
8904 :
8905 : /* Check for common prefix between old and new tuple */
8906 23495156 : for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8907 : {
8908 23443820 : if (newp[prefixlen] != oldp[prefixlen])
8909 231052 : break;
8910 : }
8911 :
8912 : /*
8913 : * Storing the length of the prefix takes 2 bytes, so we need to save
8914 : * at least 3 bytes or there's no point.
8915 : */
8916 282388 : if (prefixlen < 3)
8917 44170 : prefixlen = 0;
8918 :
8919 : /* Same for suffix */
8920 9012704 : for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
8921 : {
8922 8960852 : if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8923 230536 : break;
8924 : }
8925 282388 : if (suffixlen < 3)
8926 70678 : suffixlen = 0;
8927 : }
8928 :
8929 : /* Prepare main WAL data chain */
8930 586358 : xlrec.flags = 0;
8931 586358 : if (all_visible_cleared)
8932 3368 : xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
8933 586358 : if (new_all_visible_cleared)
8934 1694 : xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
8935 586358 : if (prefixlen > 0)
8936 238218 : xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
8937 586358 : if (suffixlen > 0)
8938 211710 : xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
8939 586358 : if (need_tuple_data)
8940 : {
8941 94044 : xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
8942 94044 : if (old_key_tuple)
8943 : {
8944 292 : if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
8945 130 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
8946 : else
8947 162 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
8948 : }
8949 : }
8950 :
8951 : /* If new tuple is the single and first tuple on page... */
8952 593184 : if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
8953 6826 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
8954 : {
8955 6396 : info |= XLOG_HEAP_INIT_PAGE;
8956 6396 : init = true;
8957 : }
8958 : else
8959 579962 : init = false;
8960 :
8961 : /* Prepare WAL data for the old page */
8962 586358 : xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
8963 586358 : xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
8964 1172716 : xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
8965 586358 : oldtup->t_data->t_infomask2);
8966 :
8967 : /* Prepare WAL data for the new page */
8968 586358 : xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
8969 586358 : xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
8970 :
8971 586358 : bufflags = REGBUF_STANDARD;
8972 586358 : if (init)
8973 6396 : bufflags |= REGBUF_WILL_INIT;
8974 586358 : if (need_tuple_data)
8975 94044 : bufflags |= REGBUF_KEEP_DATA;
8976 :
8977 586358 : XLogRegisterBuffer(0, newbuf, bufflags);
8978 586358 : if (oldbuf != newbuf)
8979 279092 : XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
8980 :
8981 586358 : XLogRegisterData(&xlrec, SizeOfHeapUpdate);
8982 :
8983 : /*
8984 : * Prepare WAL data for the new tuple.
8985 : */
8986 586358 : if (prefixlen > 0 || suffixlen > 0)
8987 : {
8988 281468 : if (prefixlen > 0 && suffixlen > 0)
8989 : {
8990 168460 : prefix_suffix[0] = prefixlen;
8991 168460 : prefix_suffix[1] = suffixlen;
8992 168460 : XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
8993 : }
8994 113008 : else if (prefixlen > 0)
8995 : {
8996 69758 : XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
8997 : }
8998 : else
8999 : {
9000 43250 : XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
9001 : }
9002 : }
9003 :
9004 586358 : xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
9005 586358 : xlhdr.t_infomask = newtup->t_data->t_infomask;
9006 586358 : xlhdr.t_hoff = newtup->t_data->t_hoff;
9007 : Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
9008 :
9009 : /*
9010 : * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
9011 : *
9012 : * The 'data' doesn't include the common prefix or suffix.
9013 : */
9014 586358 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
9015 586358 : if (prefixlen == 0)
9016 : {
9017 348140 : XLogRegisterBufData(0,
9018 348140 : (char *) newtup->t_data + SizeofHeapTupleHeader,
9019 348140 : newtup->t_len - SizeofHeapTupleHeader - suffixlen);
9020 : }
9021 : else
9022 : {
9023 : /*
9024 : * Have to write the null bitmap and data after the common prefix as
9025 : * two separate rdata entries.
9026 : */
9027 : /* bitmap [+ padding] [+ oid] */
9028 238218 : if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
9029 : {
9030 238218 : XLogRegisterBufData(0,
9031 238218 : (char *) newtup->t_data + SizeofHeapTupleHeader,
9032 238218 : newtup->t_data->t_hoff - SizeofHeapTupleHeader);
9033 : }
9034 :
9035 : /* data after common prefix */
9036 238218 : XLogRegisterBufData(0,
9037 238218 : (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
9038 238218 : newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
9039 : }
9040 :
9041 : /* We need to log a tuple identity */
9042 586358 : if (need_tuple_data && old_key_tuple)
9043 : {
9044 : /* don't really need this, but its more comfy to decode */
9045 292 : xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
9046 292 : xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
9047 292 : xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
9048 :
9049 292 : XLogRegisterData(&xlhdr_idx, SizeOfHeapHeader);
9050 :
9051 : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
9052 292 : XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
9053 292 : old_key_tuple->t_len - SizeofHeapTupleHeader);
9054 : }
9055 :
9056 : /* filtering by origin on a row level is much more efficient */
9057 586358 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
9058 :
9059 586358 : recptr = XLogInsert(RM_HEAP_ID, info);
9060 :
9061 586358 : return recptr;
9062 : }
9063 :
9064 : /*
9065 : * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
9066 : *
9067 : * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
9068 : * tuples.
9069 : */
9070 : static XLogRecPtr
9071 48894 : log_heap_new_cid(Relation relation, HeapTuple tup)
9072 : {
9073 : xl_heap_new_cid xlrec;
9074 :
9075 : XLogRecPtr recptr;
9076 48894 : HeapTupleHeader hdr = tup->t_data;
9077 :
9078 : Assert(ItemPointerIsValid(&tup->t_self));
9079 : Assert(tup->t_tableOid != InvalidOid);
9080 :
9081 48894 : xlrec.top_xid = GetTopTransactionId();
9082 48894 : xlrec.target_locator = relation->rd_locator;
9083 48894 : xlrec.target_tid = tup->t_self;
9084 :
9085 : /*
9086 : * If the tuple got inserted & deleted in the same TX we definitely have a
9087 : * combo CID, set cmin and cmax.
9088 : */
9089 48894 : if (hdr->t_infomask & HEAP_COMBOCID)
9090 : {
9091 : Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
9092 : Assert(!HeapTupleHeaderXminInvalid(hdr));
9093 4048 : xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9094 4048 : xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9095 4048 : xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
9096 : }
9097 : /* No combo CID, so only cmin or cmax can be set by this TX */
9098 : else
9099 : {
9100 : /*
9101 : * Tuple inserted.
9102 : *
9103 : * We need to check for LOCK ONLY because multixacts might be
9104 : * transferred to the new tuple in case of FOR KEY SHARE updates in
9105 : * which case there will be an xmax, although the tuple just got
9106 : * inserted.
9107 : */
9108 58428 : if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9109 13582 : HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
9110 : {
9111 31266 : xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
9112 31266 : xlrec.cmax = InvalidCommandId;
9113 : }
9114 : /* Tuple from a different tx updated or deleted. */
9115 : else
9116 : {
9117 13580 : xlrec.cmin = InvalidCommandId;
9118 13580 : xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
9119 : }
9120 44846 : xlrec.combocid = InvalidCommandId;
9121 : }
9122 :
9123 : /*
9124 : * Note that we don't need to register the buffer here, because this
9125 : * operation does not modify the page. The insert/update/delete that
9126 : * called us certainly did, but that's WAL-logged separately.
9127 : */
9128 48894 : XLogBeginInsert();
9129 48894 : XLogRegisterData(&xlrec, SizeOfHeapNewCid);
9130 :
9131 : /* will be looked at irrespective of origin */
9132 :
9133 48894 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
9134 :
9135 48894 : return recptr;
9136 : }
9137 :
9138 : /*
9139 : * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9140 : * the old tuple in an UPDATE or DELETE.
9141 : *
9142 : * Returns NULL if there's no need to log an identity or if there's no suitable
9143 : * key defined.
9144 : *
9145 : * Pass key_required true if any replica identity columns changed value, or if
9146 : * any of them have any external data. Delete must always pass true.
9147 : *
9148 : * *copy is set to true if the returned tuple is a modified copy rather than
9149 : * the same tuple that was passed in.
9150 : */
9151 : static HeapTuple
9152 3597536 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
9153 : bool *copy)
9154 : {
9155 3597536 : TupleDesc desc = RelationGetDescr(relation);
9156 3597536 : char replident = relation->rd_rel->relreplident;
9157 : Bitmapset *idattrs;
9158 : HeapTuple key_tuple;
9159 : bool nulls[MaxHeapAttributeNumber];
9160 : Datum values[MaxHeapAttributeNumber];
9161 :
9162 3597536 : *copy = false;
9163 :
9164 3597536 : if (!RelationIsLogicallyLogged(relation))
9165 3396952 : return NULL;
9166 :
9167 200584 : if (replident == REPLICA_IDENTITY_NOTHING)
9168 462 : return NULL;
9169 :
9170 200122 : if (replident == REPLICA_IDENTITY_FULL)
9171 : {
9172 : /*
9173 : * When logging the entire old tuple, it very well could contain
9174 : * toasted columns. If so, force them to be inlined.
9175 : */
9176 394 : if (HeapTupleHasExternal(tp))
9177 : {
9178 8 : *copy = true;
9179 8 : tp = toast_flatten_tuple(tp, desc);
9180 : }
9181 394 : return tp;
9182 : }
9183 :
9184 : /* if the key isn't required and we're only logging the key, we're done */
9185 199728 : if (!key_required)
9186 93752 : return NULL;
9187 :
9188 : /* find out the replica identity columns */
9189 105976 : idattrs = RelationGetIndexAttrBitmap(relation,
9190 : INDEX_ATTR_BITMAP_IDENTITY_KEY);
9191 :
9192 : /*
9193 : * If there's no defined replica identity columns, treat as !key_required.
9194 : * (This case should not be reachable from heap_update, since that should
9195 : * calculate key_required accurately. But heap_delete just passes
9196 : * constant true for key_required, so we can hit this case in deletes.)
9197 : */
9198 105976 : if (bms_is_empty(idattrs))
9199 12042 : return NULL;
9200 :
9201 : /*
9202 : * Construct a new tuple containing only the replica identity columns,
9203 : * with nulls elsewhere. While we're at it, assert that the replica
9204 : * identity columns aren't null.
9205 : */
9206 93934 : heap_deform_tuple(tp, desc, values, nulls);
9207 :
9208 301790 : for (int i = 0; i < desc->natts; i++)
9209 : {
9210 207856 : if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
9211 : idattrs))
9212 : Assert(!nulls[i]);
9213 : else
9214 113898 : nulls[i] = true;
9215 : }
9216 :
9217 93934 : key_tuple = heap_form_tuple(desc, values, nulls);
9218 93934 : *copy = true;
9219 :
9220 93934 : bms_free(idattrs);
9221 :
9222 : /*
9223 : * If the tuple, which by here only contains indexed columns, still has
9224 : * toasted columns, force them to be inlined. This is somewhat unlikely
9225 : * since there's limits on the size of indexed columns, so we don't
9226 : * duplicate toast_flatten_tuple()s functionality in the above loop over
9227 : * the indexed columns, even if it would be more efficient.
9228 : */
9229 93934 : if (HeapTupleHasExternal(key_tuple))
9230 : {
9231 8 : HeapTuple oldtup = key_tuple;
9232 :
9233 8 : key_tuple = toast_flatten_tuple(oldtup, desc);
9234 8 : heap_freetuple(oldtup);
9235 : }
9236 :
9237 93934 : return key_tuple;
9238 : }
9239 :
9240 : /*
9241 : * HeapCheckForSerializableConflictOut
9242 : * We are reading a tuple. If it's not visible, there may be a
9243 : * rw-conflict out with the inserter. Otherwise, if it is visible to us
9244 : * but has been deleted, there may be a rw-conflict out with the deleter.
9245 : *
9246 : * We will determine the top level xid of the writing transaction with which
9247 : * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9248 : * for overlap with our own transaction.
9249 : *
9250 : * This function should be called just about anywhere in heapam.c where a
9251 : * tuple has been read. The caller must hold at least a shared lock on the
9252 : * buffer, because this function might set hint bits on the tuple. There is
9253 : * currently no known reason to call this function from an index AM.
9254 : */
9255 : void
9256 62974488 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
9257 : HeapTuple tuple, Buffer buffer,
9258 : Snapshot snapshot)
9259 : {
9260 : TransactionId xid;
9261 : HTSV_Result htsvResult;
9262 :
9263 62974488 : if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9264 62923764 : return;
9265 :
9266 : /*
9267 : * Check to see whether the tuple has been written to by a concurrent
9268 : * transaction, either to create it not visible to us, or to delete it
9269 : * while it is visible to us. The "visible" bool indicates whether the
9270 : * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9271 : * is going on with it.
9272 : *
9273 : * In the event of a concurrently inserted tuple that also happens to have
9274 : * been concurrently updated (by a separate transaction), the xmin of the
9275 : * tuple will be used -- not the updater's xid.
9276 : */
9277 50724 : htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
9278 50724 : switch (htsvResult)
9279 : {
9280 49098 : case HEAPTUPLE_LIVE:
9281 49098 : if (visible)
9282 49072 : return;
9283 26 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9284 26 : break;
9285 722 : case HEAPTUPLE_RECENTLY_DEAD:
9286 : case HEAPTUPLE_DELETE_IN_PROGRESS:
9287 722 : if (visible)
9288 570 : xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9289 : else
9290 152 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9291 :
9292 722 : if (TransactionIdPrecedes(xid, TransactionXmin))
9293 : {
9294 : /* This is like the HEAPTUPLE_DEAD case */
9295 : Assert(!visible);
9296 134 : return;
9297 : }
9298 588 : break;
9299 656 : case HEAPTUPLE_INSERT_IN_PROGRESS:
9300 656 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9301 656 : break;
9302 248 : case HEAPTUPLE_DEAD:
9303 : Assert(!visible);
9304 248 : return;
9305 0 : default:
9306 :
9307 : /*
9308 : * The only way to get to this default clause is if a new value is
9309 : * added to the enum type without adding it to this switch
9310 : * statement. That's a bug, so elog.
9311 : */
9312 0 : elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9313 :
9314 : /*
9315 : * In spite of having all enum values covered and calling elog on
9316 : * this default, some compilers think this is a code path which
9317 : * allows xid to be used below without initialization. Silence
9318 : * that warning.
9319 : */
9320 : xid = InvalidTransactionId;
9321 : }
9322 :
9323 : Assert(TransactionIdIsValid(xid));
9324 : Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
9325 :
9326 : /*
9327 : * Find top level xid. Bail out if xid is too early to be a conflict, or
9328 : * if it's our own xid.
9329 : */
9330 1270 : if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
9331 128 : return;
9332 1142 : xid = SubTransGetTopmostTransaction(xid);
9333 1142 : if (TransactionIdPrecedes(xid, TransactionXmin))
9334 0 : return;
9335 :
9336 1142 : CheckForSerializableConflictOut(relation, xid, snapshot);
9337 : }
|