Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * heapam.h
4 : * POSTGRES heap access method definitions.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * src/include/access/heapam.h
11 : *
12 : *-------------------------------------------------------------------------
13 : */
14 : #ifndef HEAPAM_H
15 : #define HEAPAM_H
16 :
17 : #include "access/heapam_xlog.h"
18 : #include "access/relation.h" /* for backward compatibility */
19 : #include "access/relscan.h"
20 : #include "access/sdir.h"
21 : #include "access/skey.h"
22 : #include "access/table.h" /* for backward compatibility */
23 : #include "access/tableam.h"
24 : #include "commands/vacuum.h"
25 : #include "nodes/lockoptions.h"
26 : #include "nodes/primnodes.h"
27 : #include "storage/bufpage.h"
28 : #include "storage/dsm.h"
29 : #include "storage/lockdefs.h"
30 : #include "storage/read_stream.h"
31 : #include "storage/shm_toc.h"
32 : #include "utils/relcache.h"
33 : #include "utils/snapshot.h"
34 :
35 :
36 : /* "options" flag bits for heap_insert */
37 : #define HEAP_INSERT_SKIP_FSM TABLE_INSERT_SKIP_FSM
38 : #define HEAP_INSERT_FROZEN TABLE_INSERT_FROZEN
39 : #define HEAP_INSERT_NO_LOGICAL TABLE_INSERT_NO_LOGICAL
40 : #define HEAP_INSERT_SPECULATIVE 0x0010
41 :
42 : /* "options" flag bits for heap_page_prune_and_freeze */
43 : #define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW (1 << 0)
44 : #define HEAP_PAGE_PRUNE_FREEZE (1 << 1)
45 : #define HEAP_PAGE_PRUNE_ALLOW_FAST_PATH (1 << 2)
46 :
47 : typedef struct BulkInsertStateData *BulkInsertState;
48 : typedef struct GlobalVisState GlobalVisState;
49 : typedef struct TupleTableSlot TupleTableSlot;
50 : struct VacuumCutoffs;
51 :
52 : #define MaxLockTupleMode LockTupleExclusive
53 :
54 : /*
55 : * Descriptor for heap table scans.
56 : */
57 : typedef struct HeapScanDescData
58 : {
59 : TableScanDescData rs_base; /* AM independent part of the descriptor */
60 :
61 : /* state set up at initscan time */
62 : BlockNumber rs_nblocks; /* total number of blocks in rel */
63 : BlockNumber rs_startblock; /* block # to start at */
64 : BlockNumber rs_numblocks; /* max number of blocks to scan */
65 : /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
66 :
67 : /* scan current state */
68 : bool rs_inited; /* false = scan not init'd yet */
69 : OffsetNumber rs_coffset; /* current offset # in non-page-at-a-time mode */
70 : BlockNumber rs_cblock; /* current block # in scan, if any */
71 : Buffer rs_cbuf; /* current buffer in scan, if any */
72 : /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
73 :
74 : BufferAccessStrategy rs_strategy; /* access strategy for reads */
75 :
76 : HeapTupleData rs_ctup; /* current tuple in scan, if any */
77 :
78 : /* For scans that stream reads */
79 : ReadStream *rs_read_stream;
80 :
81 : /*
82 : * For sequential scans and TID range scans to stream reads. The read
83 : * stream is allocated at the beginning of the scan and reset on rescan or
84 : * when the scan direction changes. The scan direction is saved each time
85 : * a new page is requested. If the scan direction changes from one page to
86 : * the next, the read stream releases all previously pinned buffers and
87 : * resets the prefetch block.
88 : */
89 : ScanDirection rs_dir;
90 : BlockNumber rs_prefetch_block;
91 :
92 : /*
93 : * For parallel scans to store page allocation data. NULL when not
94 : * performing a parallel scan.
95 : */
96 : ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
97 :
98 : /*
99 : * For sequential scans and bitmap heap scans. The current heap block's
100 : * corresponding page in the visibility map.
101 : */
102 : Buffer rs_vmbuffer;
103 :
104 : /* these fields only used in page-at-a-time mode and for bitmap scans */
105 : uint32 rs_cindex; /* current tuple's index in vistuples */
106 : uint32 rs_ntuples; /* number of visible tuples on page */
107 : OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */
108 : } HeapScanDescData;
109 : typedef struct HeapScanDescData *HeapScanDesc;
110 :
111 : typedef struct BitmapHeapScanDescData
112 : {
113 : HeapScanDescData rs_heap_base;
114 :
115 : /* Holds no data */
116 : } BitmapHeapScanDescData;
117 : typedef struct BitmapHeapScanDescData *BitmapHeapScanDesc;
118 :
119 : /*
120 : * Descriptor for fetches from heap via an index.
121 : */
122 : typedef struct IndexFetchHeapData
123 : {
124 : IndexFetchTableData xs_base; /* AM independent part of the descriptor */
125 :
126 : /*
127 : * Current heap buffer in scan, if any. NB: if xs_cbuf is not
128 : * InvalidBuffer, we hold a pin on that buffer.
129 : */
130 : Buffer xs_cbuf;
131 :
132 : /* Current heap block's corresponding page in the visibility map */
133 : Buffer xs_vmbuffer;
134 : } IndexFetchHeapData;
135 :
136 : /* Result codes for HeapTupleSatisfiesVacuum */
137 : typedef enum
138 : {
139 : HEAPTUPLE_DEAD, /* tuple is dead and deletable */
140 : HEAPTUPLE_LIVE, /* tuple is live (committed, no deleter) */
141 : HEAPTUPLE_RECENTLY_DEAD, /* tuple is dead, but not deletable yet */
142 : HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
143 : HEAPTUPLE_DELETE_IN_PROGRESS, /* deleting xact is still in progress */
144 : } HTSV_Result;
145 :
146 : /*
147 : * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
148 : * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
149 : */
150 : #define HEAP_FREEZE_CHECK_XMIN_COMMITTED 0x01
151 : #define HEAP_FREEZE_CHECK_XMAX_ABORTED 0x02
152 :
153 : /* heap_prepare_freeze_tuple state describing how to freeze a tuple */
154 : typedef struct HeapTupleFreeze
155 : {
156 : /* Fields describing how to process tuple */
157 : TransactionId xmax;
158 : uint16 t_infomask2;
159 : uint16 t_infomask;
160 : uint8 frzflags;
161 :
162 : /* xmin/xmax check flags */
163 : uint8 checkflags;
164 : /* Page offset number for tuple */
165 : OffsetNumber offset;
166 : } HeapTupleFreeze;
167 :
168 : /*
169 : * State used by VACUUM to track the details of freezing all eligible tuples
170 : * on a given heap page.
171 : *
172 : * VACUUM prepares freeze plans for each page via heap_prepare_freeze_tuple
173 : * calls (every tuple with storage gets its own call). This page-level freeze
174 : * state is updated across each call, which ultimately determines whether or
175 : * not freezing the page is required.
176 : *
177 : * Aside from the basic question of whether or not freezing will go ahead, the
178 : * state also tracks the oldest extant XID/MXID in the table as a whole, for
179 : * the purposes of advancing relfrozenxid/relminmxid values in pg_class later
180 : * on. Each heap_prepare_freeze_tuple call pushes NewRelfrozenXid and/or
181 : * NewRelminMxid back as required to avoid unsafe final pg_class values. Any
182 : * and all unfrozen XIDs or MXIDs that remain after VACUUM finishes _must_
183 : * have values >= the final relfrozenxid/relminmxid values in pg_class. This
184 : * includes XIDs that remain as MultiXact members from any tuple's xmax.
185 : *
186 : * When 'freeze_required' flag isn't set after all tuples are examined, the
187 : * final choice on freezing is made by vacuumlazy.c. It can decide to trigger
188 : * freezing based on whatever criteria it deems appropriate. However, it is
189 : * recommended that vacuumlazy.c avoid early freezing when freezing does not
190 : * enable setting the target page all-frozen in the visibility map afterwards.
191 : */
192 : typedef struct HeapPageFreeze
193 : {
194 : /* Is heap_prepare_freeze_tuple caller required to freeze page? */
195 : bool freeze_required;
196 :
197 : /*
198 : * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
199 : *
200 : * Trackers used when heap_freeze_execute_prepared freezes, or when there
201 : * are zero freeze plans for a page. It is always valid for vacuumlazy.c
202 : * to freeze any page, by definition. This even includes pages that have
203 : * no tuples with storage to consider in the first place. That way the
204 : * 'totally_frozen' results from heap_prepare_freeze_tuple can always be
205 : * used in the same way, even when no freeze plans need to be executed to
206 : * "freeze the page". Only the "freeze" path needs to consider the need
207 : * to set pages all-frozen in the visibility map under this scheme.
208 : *
209 : * When we freeze a page, we generally freeze all XIDs < OldestXmin, only
210 : * leaving behind XIDs that are ineligible for freezing, if any. And so
211 : * you might wonder why these trackers are necessary at all; why should
212 : * _any_ page that VACUUM freezes _ever_ be left with XIDs/MXIDs that
213 : * ratchet back the top-level NewRelfrozenXid/NewRelminMxid trackers?
214 : *
215 : * It is useful to use a definition of "freeze the page" that does not
216 : * overspecify how MultiXacts are affected. heap_prepare_freeze_tuple
217 : * generally prefers to remove Multis eagerly, but lazy processing is used
218 : * in cases where laziness allows VACUUM to avoid allocating a new Multi.
219 : * The "freeze the page" trackers enable this flexibility.
220 : */
221 : TransactionId FreezePageRelfrozenXid;
222 : MultiXactId FreezePageRelminMxid;
223 :
224 : /*
225 : * Newest XID that this page's freeze actions will remove from tuple
226 : * visibility metadata (currently xmin and/or xvac). It is used to derive
227 : * the snapshot conflict horizon for a WAL record that freezes tuples. On
228 : * a standby, we must not replay that change while any snapshot could
229 : * still treat that XID as running.
230 : *
231 : * It's only used if we execute freeze plans for this page, so there is no
232 : * corresponding "no freeze" tracker.
233 : */
234 : TransactionId FreezePageConflictXid;
235 :
236 : /*
237 : * "No freeze" NewRelfrozenXid/NewRelminMxid trackers.
238 : *
239 : * These trackers are maintained in the same way as the trackers used when
240 : * VACUUM scans a page that isn't cleanup locked. Both code paths are
241 : * based on the same general idea (do less work for this page during the
242 : * ongoing VACUUM, at the cost of having to accept older final values).
243 : */
244 : TransactionId NoFreezePageRelfrozenXid;
245 : MultiXactId NoFreezePageRelminMxid;
246 :
247 : } HeapPageFreeze;
248 :
249 :
250 : /* 'reason' codes for heap_page_prune_and_freeze() */
251 : typedef enum
252 : {
253 : PRUNE_ON_ACCESS, /* on-access pruning */
254 : PRUNE_VACUUM_SCAN, /* VACUUM 1st heap pass */
255 : PRUNE_VACUUM_CLEANUP, /* VACUUM 2nd heap pass */
256 : } PruneReason;
257 :
258 : /*
259 : * Input parameters to heap_page_prune_and_freeze()
260 : */
261 : typedef struct PruneFreezeParams
262 : {
263 : Relation relation; /* relation containing buffer to be pruned */
264 : Buffer buffer; /* buffer to be pruned */
265 :
266 : /*
267 : * Callers should provide a pinned vmbuffer corresponding to the heap
268 : * block in buffer. We will check for and repair any corruption in the VM.
269 : */
270 : Buffer vmbuffer;
271 :
272 : /*
273 : * The reason pruning was performed. It is used to set the WAL record
274 : * opcode which is used for debugging and analysis purposes.
275 : */
276 : PruneReason reason;
277 :
278 : /*
279 : * Contains flag bits:
280 : *
281 : * HEAP_PAGE_PRUNE_MARK_UNUSED_NOW indicates that dead items can be set
282 : * LP_UNUSED during pruning.
283 : *
284 : * HEAP_PAGE_PRUNE_FREEZE indicates that we will also freeze tuples, and
285 : * will return 'all_visible', 'all_frozen' flags to the caller.
286 : */
287 : int options;
288 :
289 : /*
290 : * vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
291 : * (see heap_prune_satisfies_vacuum).
292 : */
293 : GlobalVisState *vistest;
294 :
295 : /*
296 : * Contains the cutoffs used for freezing. They are required if the
297 : * HEAP_PAGE_PRUNE_FREEZE option is set. cutoffs->OldestXmin is also used
298 : * to determine if dead tuples are HEAPTUPLE_RECENTLY_DEAD or
299 : * HEAPTUPLE_DEAD. Currently only vacuum passes in cutoffs. Vacuum
300 : * calculates them once, at the beginning of vacuuming the relation.
301 : */
302 : struct VacuumCutoffs *cutoffs;
303 : } PruneFreezeParams;
304 :
305 : /*
306 : * Per-page state returned by heap_page_prune_and_freeze()
307 : */
308 : typedef struct PruneFreezeResult
309 : {
310 : int ndeleted; /* Number of tuples deleted from the page */
311 : int nnewlpdead; /* Number of newly LP_DEAD items */
312 : int nfrozen; /* Number of tuples we froze */
313 :
314 : /* Number of live and recently dead tuples on the page, after pruning */
315 : int live_tuples;
316 : int recently_dead_tuples;
317 :
318 : /*
319 : * set_all_visible and set_all_frozen indicate if the all-visible and
320 : * all-frozen bits in the visibility map should be set for this page after
321 : * pruning.
322 : *
323 : * vm_conflict_horizon is the newest xmin of live tuples on the page. The
324 : * caller can use it as the conflict horizon when setting the VM bits. It
325 : * is only valid if we froze some tuples (nfrozen > 0), and set_all_frozen
326 : * is true.
327 : *
328 : * These are only set if the HEAP_PAGE_PRUNE_FREEZE option is set.
329 : */
330 : bool set_all_visible;
331 : bool set_all_frozen;
332 : TransactionId vm_conflict_horizon;
333 :
334 : /*
335 : * The value of the vmbuffer's vmbits at the beginning of pruning. It is
336 : * cleared if VM corruption is found and corrected.
337 : */
338 : uint8 old_vmbits;
339 :
340 : /*
341 : * Whether or not the page makes rel truncation unsafe. This is set to
342 : * 'true', even if the page contains LP_DEAD items. VACUUM will remove
343 : * them before attempting to truncate.
344 : */
345 : bool hastup;
346 :
347 : /*
348 : * LP_DEAD items on the page after pruning. Includes existing LP_DEAD
349 : * items.
350 : */
351 : int lpdead_items;
352 : OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
353 : } PruneFreezeResult;
354 :
355 :
356 : /* ----------------
357 : * function prototypes for heap access method
358 : *
359 : * heap_create, heap_create_with_catalog, and heap_drop_with_catalog
360 : * are declared in catalog/heap.h
361 : * ----------------
362 : */
363 :
364 :
365 : extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
366 : int nkeys, ScanKey key,
367 : ParallelTableScanDesc parallel_scan,
368 : uint32 flags);
369 : extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk,
370 : BlockNumber numBlks);
371 : extern void heap_prepare_pagescan(TableScanDesc sscan);
372 : extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
373 : bool allow_strat, bool allow_sync, bool allow_pagemode);
374 : extern void heap_endscan(TableScanDesc sscan);
375 : extern HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction);
376 : extern bool heap_getnextslot(TableScanDesc sscan,
377 : ScanDirection direction, TupleTableSlot *slot);
378 : extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
379 : ItemPointer maxtid);
380 : extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
381 : ScanDirection direction,
382 : TupleTableSlot *slot);
383 : extern bool heap_fetch(Relation relation, Snapshot snapshot,
384 : HeapTuple tuple, Buffer *userbuf, bool keep_buf);
385 : extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
386 : Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
387 : bool *all_dead, bool first_call);
388 :
389 : extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid);
390 :
391 : extern BulkInsertState GetBulkInsertState(void);
392 : extern void FreeBulkInsertState(BulkInsertState);
393 : extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
394 :
395 : extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
396 : int options, BulkInsertState bistate);
397 : extern void heap_multi_insert(Relation relation, TupleTableSlot **slots,
398 : int ntuples, CommandId cid, int options,
399 : BulkInsertState bistate);
400 : extern TM_Result heap_delete(Relation relation, const ItemPointerData *tid,
401 : CommandId cid, Snapshot crosscheck, bool wait,
402 : TM_FailureData *tmfd, bool changingPart);
403 : extern void heap_finish_speculative(Relation relation, const ItemPointerData *tid);
404 : extern void heap_abort_speculative(Relation relation, const ItemPointerData *tid);
405 : extern TM_Result heap_update(Relation relation, const ItemPointerData *otid,
406 : HeapTuple newtup,
407 : CommandId cid, Snapshot crosscheck, bool wait,
408 : TM_FailureData *tmfd, LockTupleMode *lockmode,
409 : TU_UpdateIndexes *update_indexes);
410 : extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
411 : CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
412 : bool follow_updates,
413 : Buffer *buffer, TM_FailureData *tmfd);
414 :
415 : extern bool heap_inplace_lock(Relation relation,
416 : HeapTuple oldtup_ptr, Buffer buffer,
417 : void (*release_callback) (void *), void *arg);
418 : extern void heap_inplace_update_and_unlock(Relation relation,
419 : HeapTuple oldtup, HeapTuple tuple,
420 : Buffer buffer);
421 : extern void heap_inplace_unlock(Relation relation,
422 : HeapTuple oldtup, Buffer buffer);
423 : extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
424 : const struct VacuumCutoffs *cutoffs,
425 : HeapPageFreeze *pagefrz,
426 : HeapTupleFreeze *frz, bool *totally_frozen);
427 :
428 : extern void heap_pre_freeze_checks(Buffer buffer,
429 : HeapTupleFreeze *tuples, int ntuples);
430 : extern void heap_freeze_prepared_tuples(Buffer buffer,
431 : HeapTupleFreeze *tuples, int ntuples);
432 : extern bool heap_freeze_tuple(HeapTupleHeader tuple,
433 : TransactionId relfrozenxid, TransactionId relminmxid,
434 : TransactionId FreezeLimit, TransactionId MultiXactCutoff);
435 : extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
436 : const struct VacuumCutoffs *cutoffs,
437 : TransactionId *NoFreezePageRelfrozenXid,
438 : MultiXactId *NoFreezePageRelminMxid);
439 : extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
440 :
441 : extern void simple_heap_insert(Relation relation, HeapTuple tup);
442 : extern void simple_heap_delete(Relation relation, const ItemPointerData *tid);
443 : extern void simple_heap_update(Relation relation, const ItemPointerData *otid,
444 : HeapTuple tup, TU_UpdateIndexes *update_indexes);
445 :
446 : extern TransactionId heap_index_delete_tuples(Relation rel,
447 : TM_IndexDeleteOp *delstate);
448 :
449 : /* in heap/pruneheap.c */
450 : extern void heap_page_prune_opt(Relation relation, Buffer buffer,
451 : Buffer *vmbuffer);
452 : extern void heap_page_prune_and_freeze(PruneFreezeParams *params,
453 : PruneFreezeResult *presult,
454 : OffsetNumber *off_loc,
455 : TransactionId *new_relfrozen_xid,
456 : MultiXactId *new_relmin_mxid);
457 : extern void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only,
458 : OffsetNumber *redirected, int nredirected,
459 : OffsetNumber *nowdead, int ndead,
460 : OffsetNumber *nowunused, int nunused);
461 : extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
462 : extern void log_heap_prune_and_freeze(Relation relation, Buffer buffer,
463 : Buffer vmbuffer, uint8 vmflags,
464 : TransactionId conflict_xid,
465 : bool cleanup_lock,
466 : PruneReason reason,
467 : HeapTupleFreeze *frozen, int nfrozen,
468 : OffsetNumber *redirected, int nredirected,
469 : OffsetNumber *dead, int ndead,
470 : OffsetNumber *unused, int nunused);
471 :
472 : /* in heap/vacuumlazy.c */
473 : extern void heap_vacuum_rel(Relation rel,
474 : const VacuumParams params, BufferAccessStrategy bstrategy);
475 :
476 : /* in heap/heapam_visibility.c */
477 : extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot,
478 : Buffer buffer);
479 : extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
480 : Buffer buffer);
481 : extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
482 : Buffer buffer);
483 : extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer,
484 : TransactionId *dead_after);
485 : extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
486 : uint16 infomask, TransactionId xid);
487 : extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
488 : extern bool HeapTupleIsSurelyDead(HeapTuple htup,
489 : GlobalVisState *vistest);
490 :
491 : /*
492 : * Some of the input/output to/from HeapTupleSatisfiesMVCCBatch() is passed
493 : * via this struct, as otherwise the increased number of arguments to
494 : * HeapTupleSatisfiesMVCCBatch() leads to on-stack argument passing on x86-64,
495 : * which causes a small regression.
496 : */
497 : typedef struct BatchMVCCState
498 : {
499 : HeapTupleData tuples[MaxHeapTuplesPerPage];
500 : bool visible[MaxHeapTuplesPerPage];
501 : } BatchMVCCState;
502 :
503 : extern int HeapTupleSatisfiesMVCCBatch(Snapshot snapshot, Buffer buffer,
504 : int ntups,
505 : BatchMVCCState *batchmvcc,
506 : OffsetNumber *vistuples_dense);
507 :
508 : /*
509 : * To avoid leaking too much knowledge about reorderbuffer implementation
510 : * details this is implemented in reorderbuffer.c not heapam_visibility.c
511 : */
512 : struct HTAB;
513 : extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
514 : Snapshot snapshot,
515 : HeapTuple htup,
516 : Buffer buffer,
517 : CommandId *cmin, CommandId *cmax);
518 : extern void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple,
519 : Buffer buffer, Snapshot snapshot);
520 :
521 : /*
522 : * heap_execute_freeze_tuple
523 : * Execute the prepared freezing of a tuple with caller's freeze plan.
524 : *
525 : * Caller is responsible for ensuring that no other backend can access the
526 : * storage underlying this tuple, either by holding an exclusive lock on the
527 : * buffer containing it (which is what lazy VACUUM does), or by having it be
528 : * in private storage (which is what CLUSTER and friends do).
529 : */
530 : static inline void
531 1486975 : heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
532 : {
533 1486975 : HeapTupleHeaderSetXmax(tuple, frz->xmax);
534 :
535 1486975 : if (frz->frzflags & XLH_FREEZE_XVAC)
536 0 : HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
537 :
538 1486975 : if (frz->frzflags & XLH_INVALID_XVAC)
539 0 : HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
540 :
541 1486975 : tuple->t_infomask = frz->t_infomask;
542 1486975 : tuple->t_infomask2 = frz->t_infomask2;
543 1486975 : }
544 :
545 : #endif /* HEAPAM_H */
|