Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * visibilitymap.c
4 : * bitmap for tracking visibility of heap tuples
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/visibilitymap.c
12 : *
13 : * INTERFACE ROUTINES
14 : * visibilitymap_clear - clear bits for one page in the visibility map
15 : * visibilitymap_pin - pin a map page for setting a bit
16 : * visibilitymap_pin_ok - check whether correct map page is already pinned
17 : * visibilitymap_set - set bit(s) in a previously pinned page and log
18 : * visibilitymap_set_vmbits - set bit(s) in a pinned page
19 : * visibilitymap_get_status - get status of bits
20 : * visibilitymap_count - count number of bits set in visibility map
21 : * visibilitymap_prepare_truncate -
22 : * prepare for truncation of the visibility map
23 : *
24 : * NOTES
25 : *
26 : * The visibility map is a bitmap with two bits (all-visible and all-frozen)
27 : * per heap page. A set all-visible bit means that all tuples on the page are
28 : * known visible to all transactions, and therefore the page doesn't need to
29 : * be vacuumed. A set all-frozen bit means that all tuples on the page are
30 : * completely frozen, and therefore the page doesn't need to be vacuumed even
31 : * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
32 : * The all-frozen bit must be set only when the page is already all-visible.
33 : *
34 : * The map is conservative in the sense that we make sure that whenever a bit
35 : * is set, we know the condition is true, but if a bit is not set, it might or
36 : * might not be true.
37 : *
38 : * Clearing visibility map bits is not separately WAL-logged. The callers
39 : * must make sure that whenever a bit is cleared, the bit is cleared on WAL
40 : * replay of the updating operation as well.
41 : *
42 : * When we *set* a visibility map during VACUUM, we must write WAL. This may
43 : * seem counterintuitive, since the bit is basically a hint: if it is clear,
44 : * it may still be the case that every tuple on the page is visible to all
45 : * transactions; we just don't know that for certain. The difficulty is that
46 : * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
47 : * on the page itself, and the visibility map bit. If a crash occurs after the
48 : * visibility map page makes it to disk and before the updated heap page makes
49 : * it to disk, redo must set the bit on the heap page. Otherwise, the next
50 : * insert, update, or delete on the heap page will fail to realize that the
51 : * visibility map bit must be cleared, possibly causing index-only scans to
52 : * return wrong answers.
53 : *
54 : * VACUUM will normally skip pages for which the visibility map bit is set;
55 : * such pages can't contain any dead tuples and therefore don't need vacuuming.
56 : *
57 : * LOCKING
58 : *
59 : * In heapam.c, whenever a page is modified so that not all tuples on the
60 : * page are visible to everyone anymore, the corresponding bit in the
61 : * visibility map is cleared. In order to be crash-safe, we need to do this
62 : * while still holding a lock on the heap page and in the same critical
63 : * section that logs the page modification. However, we don't want to hold
64 : * the buffer lock over any I/O that may be required to read in the visibility
65 : * map page. To avoid this, we examine the heap page before locking it;
66 : * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
67 : * bit. Then, we lock the buffer. But this creates a race condition: there
68 : * is a possibility that in the time it takes to lock the buffer, the
69 : * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
70 : * buffer, pin the visibility map page, and relock the buffer. This shouldn't
71 : * happen often, because only VACUUM currently sets visibility map bits,
72 : * and the race will only occur if VACUUM processes a given page at almost
73 : * exactly the same time that someone tries to further modify it.
74 : *
75 : * To set a bit, you need to hold a lock on the heap page. That prevents
76 : * the race condition where VACUUM sees that all tuples on the page are
77 : * visible to everyone, but another backend modifies the page before VACUUM
78 : * sets the bit in the visibility map.
79 : *
80 : * When a bit is set, the LSN of the visibility map page is updated to make
81 : * sure that the visibility map update doesn't get written to disk before the
82 : * WAL record of the changes that made it possible to set the bit is flushed.
83 : * But when a bit is cleared, we don't have to do that because it's always
84 : * safe to clear a bit in the map from correctness point of view.
85 : *
86 : *-------------------------------------------------------------------------
87 : */
88 : #include "postgres.h"
89 :
90 : #include "access/heapam_xlog.h"
91 : #include "access/visibilitymap.h"
92 : #include "access/xloginsert.h"
93 : #include "access/xlogutils.h"
94 : #include "miscadmin.h"
95 : #include "port/pg_bitutils.h"
96 : #include "storage/bufmgr.h"
97 : #include "storage/smgr.h"
98 : #include "utils/inval.h"
99 : #include "utils/rel.h"
100 :
101 :
102 : /*#define TRACE_VISIBILITYMAP */
103 :
104 : /*
105 : * Size of the bitmap on each visibility map page, in bytes. There's no
106 : * extra headers, so the whole page minus the standard page header is
107 : * used for the bitmap.
108 : */
109 : #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
110 :
111 : /* Number of heap blocks we can represent in one byte */
112 : #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
113 :
114 : /* Number of heap blocks we can represent in one visibility map page. */
115 : #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
116 :
117 : /* Mapping from heap block number to the right bit in the visibility map */
118 : #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
119 : #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
120 : #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
121 :
122 : /* Masks for counting subsets of bits in the visibility map. */
123 : #define VISIBLE_MASK8 (0x55) /* The lower bit of each bit pair */
124 : #define FROZEN_MASK8 (0xaa) /* The upper bit of each bit pair */
125 :
126 : /* prototypes for internal routines */
127 : static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
128 : static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
129 :
130 :
131 : /*
132 : * visibilitymap_clear - clear specified bits for one page in visibility map
133 : *
134 : * You must pass a buffer containing the correct map page to this function.
135 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
136 : * any I/O. Returns true if any bits have been cleared and false otherwise.
137 : */
138 : bool
139 39870 : visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
140 : {
141 39870 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
142 39870 : int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
143 39870 : int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
144 39870 : uint8 mask = flags << mapOffset;
145 : char *map;
146 39870 : bool cleared = false;
147 :
148 : /* Must never clear all_visible bit while leaving all_frozen bit set */
149 : Assert(flags & VISIBILITYMAP_VALID_BITS);
150 : Assert(flags != VISIBILITYMAP_ALL_VISIBLE);
151 :
152 : #ifdef TRACE_VISIBILITYMAP
153 : elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
154 : #endif
155 :
156 39870 : if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
157 0 : elog(ERROR, "wrong buffer passed to visibilitymap_clear");
158 :
159 39870 : LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
160 39870 : map = PageGetContents(BufferGetPage(vmbuf));
161 :
162 39870 : if (map[mapByte] & mask)
163 : {
164 35694 : map[mapByte] &= ~mask;
165 :
166 35694 : MarkBufferDirty(vmbuf);
167 35694 : cleared = true;
168 : }
169 :
170 39870 : LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
171 :
172 39870 : return cleared;
173 : }
174 :
175 : /*
176 : * visibilitymap_pin - pin a map page for setting a bit
177 : *
178 : * Setting a bit in the visibility map is a two-phase operation. First, call
179 : * visibilitymap_pin, to pin the visibility map page containing the bit for
180 : * the heap page. Because that can require I/O to read the map page, you
181 : * shouldn't hold a lock on the heap page while doing that. Then, call
182 : * visibilitymap_set to actually set the bit.
183 : *
184 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
185 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
186 : * relation. On return, *vmbuf is a valid buffer with the map page containing
187 : * the bit for heapBlk.
188 : *
189 : * If the page doesn't exist in the map file yet, it is extended.
190 : */
191 : void
192 767120 : visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
193 : {
194 767120 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
195 :
196 : /* Reuse the old pinned buffer if possible */
197 767120 : if (BufferIsValid(*vmbuf))
198 : {
199 659050 : if (BufferGetBlockNumber(*vmbuf) == mapBlock)
200 659050 : return;
201 :
202 0 : ReleaseBuffer(*vmbuf);
203 : }
204 108070 : *vmbuf = vm_readbuf(rel, mapBlock, true);
205 : }
206 :
207 : /*
208 : * visibilitymap_pin_ok - do we already have the correct page pinned?
209 : *
210 : * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
211 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
212 : * relation. The return value indicates whether the buffer covers the
213 : * given heapBlk.
214 : */
215 : bool
216 31608 : visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
217 : {
218 31608 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
219 :
220 31608 : return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
221 : }
222 :
223 : /*
224 : * visibilitymap_set - set bit(s) on a previously pinned page
225 : *
226 : * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
227 : * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
228 : * one provided; in normal running, we generate a new XLOG record and set the
229 : * page LSN to that value (though the heap page's LSN may *not* be updated;
230 : * see below). cutoff_xid is the largest xmin on the page being marked
231 : * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
232 : * if the page contains no tuples. It can also be set to InvalidTransactionId
233 : * when a page that is already all-visible is being marked all-frozen.
234 : *
235 : * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
236 : * this function. Except in recovery, caller should also pass the heap
237 : * buffer. When checksums are enabled and we're not in recovery, we must add
238 : * the heap buffer to the WAL chain to protect it from being torn.
239 : *
240 : * You must pass a buffer containing the correct map page to this function.
241 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
242 : * any I/O.
243 : */
244 : void
245 85450 : visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
246 : XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
247 : uint8 flags)
248 : {
249 85450 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
250 85450 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
251 85450 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
252 : Page page;
253 : uint8 *map;
254 : uint8 status;
255 :
256 : #ifdef TRACE_VISIBILITYMAP
257 : elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
258 : flags, RelationGetRelationName(rel), heapBlk);
259 : #endif
260 :
261 : Assert(InRecovery || !XLogRecPtrIsValid(recptr));
262 : Assert(InRecovery || PageIsAllVisible(BufferGetPage(heapBuf)));
263 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
264 :
265 : /* Must never set all_frozen bit without also setting all_visible bit */
266 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
267 :
268 : /* Check that we have the right heap page pinned, if present */
269 85450 : if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
270 0 : elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
271 :
272 : Assert(!BufferIsValid(heapBuf) ||
273 : BufferIsLockedByMeInMode(heapBuf, BUFFER_LOCK_EXCLUSIVE));
274 :
275 : /* Check that we have the right VM page pinned */
276 85450 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
277 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
278 :
279 85450 : page = BufferGetPage(vmBuf);
280 85450 : map = (uint8 *) PageGetContents(page);
281 85450 : LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
282 :
283 85450 : status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
284 85450 : if (flags != status)
285 : {
286 85450 : START_CRIT_SECTION();
287 :
288 85450 : map[mapByte] |= (flags << mapOffset);
289 85450 : MarkBufferDirty(vmBuf);
290 :
291 85450 : if (RelationNeedsWAL(rel))
292 : {
293 82326 : if (!XLogRecPtrIsValid(recptr))
294 : {
295 : Assert(!InRecovery);
296 72952 : recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
297 :
298 : /*
299 : * If data checksums are enabled (or wal_log_hints=on), we
300 : * need to protect the heap page from being torn.
301 : *
302 : * If not, then we must *not* update the heap page's LSN. In
303 : * this case, the FPI for the heap page was omitted from the
304 : * WAL record inserted above, so it would be incorrect to
305 : * update the heap page's LSN.
306 : */
307 72952 : if (XLogHintBitIsNeeded())
308 : {
309 66782 : Page heapPage = BufferGetPage(heapBuf);
310 :
311 66782 : PageSetLSN(heapPage, recptr);
312 : }
313 : }
314 82326 : PageSetLSN(page, recptr);
315 : }
316 :
317 85450 : END_CRIT_SECTION();
318 : }
319 :
320 85450 : LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
321 85450 : }
322 :
323 : /*
324 : * Set VM (visibility map) flags in the VM block in vmBuf.
325 : *
326 : * This function is intended for callers that log VM changes together
327 : * with the heap page modifications that rendered the page all-visible.
328 : * Callers that log VM changes separately should use visibilitymap_set().
329 : *
330 : * vmBuf must be pinned and exclusively locked, and it must cover the VM bits
331 : * corresponding to heapBlk.
332 : *
333 : * In normal operation (not recovery), this must be called inside a critical
334 : * section that also applies the necessary heap page changes and, if
335 : * applicable, emits WAL.
336 : *
337 : * The caller is responsible for ensuring consistency between the heap page
338 : * and the VM page by holding a pin and exclusive lock on the buffer
339 : * containing heapBlk.
340 : *
341 : * rlocator is used only for debugging messages.
342 : */
343 : void
344 37490 : visibilitymap_set_vmbits(BlockNumber heapBlk,
345 : Buffer vmBuf, uint8 flags,
346 : const RelFileLocator rlocator)
347 : {
348 37490 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
349 37490 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
350 37490 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
351 : Page page;
352 : uint8 *map;
353 : uint8 status;
354 :
355 : #ifdef TRACE_VISIBILITYMAP
356 : elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
357 : flags,
358 : relpathbackend(rlocator, MyProcNumber, MAIN_FORKNUM).str,
359 : heapBlk);
360 : #endif
361 :
362 : /* Call in same critical section where WAL is emitted. */
363 : Assert(InRecovery || CritSectionCount > 0);
364 :
365 : /* Flags should be valid. Also never clear bits with this function */
366 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
367 :
368 : /* Must never set all_frozen bit without also setting all_visible bit */
369 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
370 :
371 : /* Check that we have the right VM page pinned */
372 37490 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
373 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
374 :
375 : Assert(BufferIsLockedByMeInMode(vmBuf, BUFFER_LOCK_EXCLUSIVE));
376 :
377 37490 : page = BufferGetPage(vmBuf);
378 37490 : map = (uint8 *) PageGetContents(page);
379 :
380 37490 : status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
381 37490 : if (flags != status)
382 : {
383 37490 : map[mapByte] |= (flags << mapOffset);
384 37490 : MarkBufferDirty(vmBuf);
385 : }
386 37490 : }
387 :
388 : /*
389 : * visibilitymap_get_status - get status of bits
390 : *
391 : * Are all tuples on heapBlk visible to all or are marked frozen, according
392 : * to the visibility map?
393 : *
394 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
395 : * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
396 : * relation. On return, *vmbuf is a valid buffer with the map page containing
397 : * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
398 : * releasing *vmbuf after it's done testing and setting bits.
399 : *
400 : * NOTE: This function is typically called without a lock on the heap page,
401 : * so somebody else could change the bit just after we look at it. In fact,
402 : * since we don't lock the visibility map page either, it's even possible that
403 : * someone else could have changed the bit just before we look at it, but yet
404 : * we might see the old value. It is the caller's responsibility to deal with
405 : * all concurrency issues!
406 : */
407 : uint8
408 7539598 : visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
409 : {
410 7539598 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
411 7539598 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
412 7539598 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
413 : char *map;
414 : uint8 result;
415 :
416 : #ifdef TRACE_VISIBILITYMAP
417 : elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
418 : #endif
419 :
420 : /* Reuse the old pinned buffer if possible */
421 7539598 : if (BufferIsValid(*vmbuf))
422 : {
423 5812436 : if (BufferGetBlockNumber(*vmbuf) != mapBlock)
424 : {
425 0 : ReleaseBuffer(*vmbuf);
426 0 : *vmbuf = InvalidBuffer;
427 : }
428 : }
429 :
430 7539598 : if (!BufferIsValid(*vmbuf))
431 : {
432 1727162 : *vmbuf = vm_readbuf(rel, mapBlock, false);
433 1727162 : if (!BufferIsValid(*vmbuf))
434 1534092 : return (uint8) 0;
435 : }
436 :
437 6005506 : map = PageGetContents(BufferGetPage(*vmbuf));
438 :
439 : /*
440 : * A single byte read is atomic. There could be memory-ordering effects
441 : * here, but for performance reasons we make it the caller's job to worry
442 : * about that.
443 : */
444 6005506 : result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
445 6005506 : return result;
446 : }
447 :
448 : /*
449 : * visibilitymap_count - count number of bits set in visibility map
450 : *
451 : * Note: we ignore the possibility of race conditions when the table is being
452 : * extended concurrently with the call. New pages added to the table aren't
453 : * going to be marked all-visible or all-frozen, so they won't affect the result.
454 : */
455 : void
456 198718 : visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
457 : {
458 : BlockNumber mapBlock;
459 198718 : BlockNumber nvisible = 0;
460 198718 : BlockNumber nfrozen = 0;
461 :
462 : /* all_visible must be specified */
463 : Assert(all_visible);
464 :
465 198718 : for (mapBlock = 0;; mapBlock++)
466 73584 : {
467 : Buffer mapBuffer;
468 : uint64 *map;
469 :
470 : /*
471 : * Read till we fall off the end of the map. We assume that any extra
472 : * bytes in the last page are zeroed, so we don't bother excluding
473 : * them from the count.
474 : */
475 272302 : mapBuffer = vm_readbuf(rel, mapBlock, false);
476 272302 : if (!BufferIsValid(mapBuffer))
477 198718 : break;
478 :
479 : /*
480 : * We choose not to lock the page, since the result is going to be
481 : * immediately stale anyway if anyone is concurrently setting or
482 : * clearing bits, and we only really need an approximate value.
483 : */
484 73584 : map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
485 :
486 73584 : nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
487 73584 : if (all_frozen)
488 73584 : nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
489 :
490 73584 : ReleaseBuffer(mapBuffer);
491 : }
492 :
493 198718 : *all_visible = nvisible;
494 198718 : if (all_frozen)
495 198718 : *all_frozen = nfrozen;
496 198718 : }
497 :
498 : /*
499 : * visibilitymap_prepare_truncate -
500 : * prepare for truncation of the visibility map
501 : *
502 : * nheapblocks is the new size of the heap.
503 : *
504 : * Return the number of blocks of new visibility map.
505 : * If it's InvalidBlockNumber, there is nothing to truncate;
506 : * otherwise the caller is responsible for calling smgrtruncate()
507 : * to truncate the visibility map pages.
508 : */
509 : BlockNumber
510 376 : visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
511 : {
512 : BlockNumber newnblocks;
513 :
514 : /* last remaining block, byte, and bit */
515 376 : BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
516 376 : uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
517 376 : uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
518 :
519 : #ifdef TRACE_VISIBILITYMAP
520 : elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
521 : #endif
522 :
523 : /*
524 : * If no visibility map has been created yet for this relation, there's
525 : * nothing to truncate.
526 : */
527 376 : if (!smgrexists(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM))
528 0 : return InvalidBlockNumber;
529 :
530 : /*
531 : * Unless the new size is exactly at a visibility map page boundary, the
532 : * tail bits in the last remaining map page, representing truncated heap
533 : * blocks, need to be cleared. This is not only tidy, but also necessary
534 : * because we don't get a chance to clear the bits if the heap is extended
535 : * again.
536 : */
537 376 : if (truncByte != 0 || truncOffset != 0)
538 234 : {
539 : Buffer mapBuffer;
540 : Page page;
541 : char *map;
542 :
543 234 : newnblocks = truncBlock + 1;
544 :
545 234 : mapBuffer = vm_readbuf(rel, truncBlock, false);
546 234 : if (!BufferIsValid(mapBuffer))
547 : {
548 : /* nothing to do, the file was already smaller */
549 0 : return InvalidBlockNumber;
550 : }
551 :
552 234 : page = BufferGetPage(mapBuffer);
553 234 : map = PageGetContents(page);
554 :
555 234 : LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
556 :
557 : /* NO EREPORT(ERROR) from here till changes are logged */
558 234 : START_CRIT_SECTION();
559 :
560 : /* Clear out the unwanted bytes. */
561 234 : MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
562 :
563 : /*----
564 : * Mask out the unwanted bits of the last remaining byte.
565 : *
566 : * ((1 << 0) - 1) = 00000000
567 : * ((1 << 1) - 1) = 00000001
568 : * ...
569 : * ((1 << 6) - 1) = 00111111
570 : * ((1 << 7) - 1) = 01111111
571 : *----
572 : */
573 234 : map[truncByte] &= (1 << truncOffset) - 1;
574 :
575 : /*
576 : * Truncation of a relation is WAL-logged at a higher-level, and we
577 : * will be called at WAL replay. But if checksums are enabled, we need
578 : * to still write a WAL record to protect against a torn page, if the
579 : * page is flushed to disk before the truncation WAL record. We cannot
580 : * use MarkBufferDirtyHint here, because that will not dirty the page
581 : * during recovery.
582 : */
583 234 : MarkBufferDirty(mapBuffer);
584 234 : if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded())
585 194 : log_newpage_buffer(mapBuffer, false);
586 :
587 234 : END_CRIT_SECTION();
588 :
589 234 : UnlockReleaseBuffer(mapBuffer);
590 : }
591 : else
592 142 : newnblocks = truncBlock;
593 :
594 376 : if (smgrnblocks(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM) <= newnblocks)
595 : {
596 : /* nothing to do, the file was already smaller than requested size */
597 234 : return InvalidBlockNumber;
598 : }
599 :
600 142 : return newnblocks;
601 : }
602 :
603 : /*
604 : * Read a visibility map page.
605 : *
606 : * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
607 : * true, the visibility map file is extended.
608 : */
609 : static Buffer
610 2107768 : vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
611 : {
612 : Buffer buf;
613 : SMgrRelation reln;
614 :
615 : /*
616 : * Caution: re-using this smgr pointer could fail if the relcache entry
617 : * gets closed. It's safe as long as we only do smgr-level operations
618 : * between here and the last use of the pointer.
619 : */
620 2107768 : reln = RelationGetSmgr(rel);
621 :
622 : /*
623 : * If we haven't cached the size of the visibility map fork yet, check it
624 : * first.
625 : */
626 2107768 : if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
627 : {
628 223424 : if (smgrexists(reln, VISIBILITYMAP_FORKNUM))
629 94544 : smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
630 : else
631 128880 : reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
632 : }
633 :
634 : /*
635 : * For reading we use ZERO_ON_ERROR mode, and initialize the page if
636 : * necessary. It's always safe to clear bits, so it's better to clear
637 : * corrupt pages than error out.
638 : *
639 : * We use the same path below to initialize pages when extending the
640 : * relation, as a concurrent extension can end up with vm_extend()
641 : * returning an already-initialized page.
642 : */
643 2107768 : if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
644 : {
645 1738946 : if (extend)
646 6136 : buf = vm_extend(rel, blkno + 1);
647 : else
648 1732810 : return InvalidBuffer;
649 : }
650 : else
651 368822 : buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
652 : RBM_ZERO_ON_ERROR, NULL);
653 :
654 : /*
655 : * Initializing the page when needed is trickier than it looks, because of
656 : * the possibility of multiple backends doing this concurrently, and our
657 : * desire to not uselessly take the buffer lock in the normal path where
658 : * the page is OK. We must take the lock to initialize the page, so
659 : * recheck page newness after we have the lock, in case someone else
660 : * already did it. Also, because we initially check PageIsNew with no
661 : * lock, it's possible to fall through and return the buffer while someone
662 : * else is still initializing the page (i.e., we might see pd_upper as set
663 : * but other page header fields are still zeroes). This is harmless for
664 : * callers that will take a buffer lock themselves, but some callers
665 : * inspect the page without any lock at all. The latter is OK only so
666 : * long as it doesn't depend on the page header having correct contents.
667 : * Current usage is safe because PageGetContents() does not require that.
668 : */
669 374958 : if (PageIsNew(BufferGetPage(buf)))
670 : {
671 6234 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
672 6234 : if (PageIsNew(BufferGetPage(buf)))
673 6234 : PageInit(BufferGetPage(buf), BLCKSZ, 0);
674 6234 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
675 : }
676 374958 : return buf;
677 : }
678 :
679 : /*
680 : * Ensure that the visibility map fork is at least vm_nblocks long, extending
681 : * it if necessary with zeroed pages.
682 : */
683 : static Buffer
684 6136 : vm_extend(Relation rel, BlockNumber vm_nblocks)
685 : {
686 : Buffer buf;
687 :
688 6136 : buf = ExtendBufferedRelTo(BMR_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
689 : EB_CREATE_FORK_IF_NEEDED |
690 : EB_CLEAR_SIZE_CACHE,
691 : vm_nblocks,
692 : RBM_ZERO_ON_ERROR);
693 :
694 : /*
695 : * Send a shared-inval message to force other backends to close any smgr
696 : * references they may have for this rel, which we are about to change.
697 : * This is a useful optimization because it means that backends don't have
698 : * to keep checking for creation or extension of the file, which happens
699 : * infrequently.
700 : */
701 6136 : CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
702 :
703 6136 : return buf;
704 : }
|