Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * visibilitymap.c
4 : * bitmap for tracking visibility of heap tuples
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/visibilitymap.c
12 : *
13 : * INTERFACE ROUTINES
14 : * visibilitymap_clear - clear bits for one page in the visibility map
15 : * visibilitymap_pin - pin a map page for setting a bit
16 : * visibilitymap_pin_ok - check whether correct map page is already pinned
17 : * visibilitymap_set - set a bit in a previously pinned page
18 : * visibilitymap_get_status - get status of bits
19 : * visibilitymap_count - count number of bits set in visibility map
20 : * visibilitymap_prepare_truncate -
21 : * prepare for truncation of the visibility map
22 : *
23 : * NOTES
24 : *
25 : * The visibility map is a bitmap with two bits (all-visible and all-frozen)
26 : * per heap page. A set all-visible bit means that all tuples on the page are
27 : * known visible to all transactions, and therefore the page doesn't need to
28 : * be vacuumed. A set all-frozen bit means that all tuples on the page are
29 : * completely frozen, and therefore the page doesn't need to be vacuumed even
30 : * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
31 : * The all-frozen bit must be set only when the page is already all-visible.
32 : *
33 : * The map is conservative in the sense that we make sure that whenever a bit
34 : * is set, we know the condition is true, but if a bit is not set, it might or
35 : * might not be true.
36 : *
37 : * Clearing visibility map bits is not separately WAL-logged. The callers
38 : * must make sure that whenever a bit is cleared, the bit is cleared on WAL
39 : * replay of the updating operation as well.
40 : *
41 : * When we *set* a visibility map during VACUUM, we must write WAL. This may
42 : * seem counterintuitive, since the bit is basically a hint: if it is clear,
43 : * it may still be the case that every tuple on the page is visible to all
44 : * transactions; we just don't know that for certain. The difficulty is that
45 : * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
46 : * on the page itself, and the visibility map bit. If a crash occurs after the
47 : * visibility map page makes it to disk and before the updated heap page makes
48 : * it to disk, redo must set the bit on the heap page. Otherwise, the next
49 : * insert, update, or delete on the heap page will fail to realize that the
50 : * visibility map bit must be cleared, possibly causing index-only scans to
51 : * return wrong answers.
52 : *
53 : * VACUUM will normally skip pages for which the visibility map bit is set;
54 : * such pages can't contain any dead tuples and therefore don't need vacuuming.
55 : *
56 : * LOCKING
57 : *
58 : * In heapam.c, whenever a page is modified so that not all tuples on the
59 : * page are visible to everyone anymore, the corresponding bit in the
60 : * visibility map is cleared. In order to be crash-safe, we need to do this
61 : * while still holding a lock on the heap page and in the same critical
62 : * section that logs the page modification. However, we don't want to hold
63 : * the buffer lock over any I/O that may be required to read in the visibility
64 : * map page. To avoid this, we examine the heap page before locking it;
65 : * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
66 : * bit. Then, we lock the buffer. But this creates a race condition: there
67 : * is a possibility that in the time it takes to lock the buffer, the
68 : * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
69 : * buffer, pin the visibility map page, and relock the buffer. This shouldn't
70 : * happen often, because only VACUUM currently sets visibility map bits,
71 : * and the race will only occur if VACUUM processes a given page at almost
72 : * exactly the same time that someone tries to further modify it.
73 : *
74 : * To set a bit, you need to hold a lock on the heap page. That prevents
75 : * the race condition where VACUUM sees that all tuples on the page are
76 : * visible to everyone, but another backend modifies the page before VACUUM
77 : * sets the bit in the visibility map.
78 : *
79 : * When a bit is set, the LSN of the visibility map page is updated to make
80 : * sure that the visibility map update doesn't get written to disk before the
81 : * WAL record of the changes that made it possible to set the bit is flushed.
82 : * But when a bit is cleared, we don't have to do that because it's always
83 : * safe to clear a bit in the map from correctness point of view.
84 : *
85 : *-------------------------------------------------------------------------
86 : */
87 : #include "postgres.h"
88 :
89 : #include "access/heapam_xlog.h"
90 : #include "access/visibilitymap.h"
91 : #include "access/xloginsert.h"
92 : #include "access/xlogutils.h"
93 : #include "miscadmin.h"
94 : #include "port/pg_bitutils.h"
95 : #include "storage/bufmgr.h"
96 : #include "storage/smgr.h"
97 : #include "utils/inval.h"
98 : #include "utils/rel.h"
99 :
100 :
101 : /*#define TRACE_VISIBILITYMAP */
102 :
103 : /*
104 : * Size of the bitmap on each visibility map page, in bytes. There's no
105 : * extra headers, so the whole page minus the standard page header is
106 : * used for the bitmap.
107 : */
108 : #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
109 :
110 : /* Number of heap blocks we can represent in one byte */
111 : #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
112 :
113 : /* Number of heap blocks we can represent in one visibility map page. */
114 : #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
115 :
116 : /* Mapping from heap block number to the right bit in the visibility map */
117 : #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
118 : #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
119 : #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
120 :
121 : /* Masks for counting subsets of bits in the visibility map. */
122 : #define VISIBLE_MASK8 (0x55) /* The lower bit of each bit pair */
123 : #define FROZEN_MASK8 (0xaa) /* The upper bit of each bit pair */
124 :
125 : /* prototypes for internal routines */
126 : static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
127 : static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
128 :
129 :
130 : /*
131 : * visibilitymap_clear - clear specified bits for one page in visibility map
132 : *
133 : * You must pass a buffer containing the correct map page to this function.
134 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
135 : * any I/O. Returns true if any bits have been cleared and false otherwise.
136 : */
137 : bool
138 33274 : visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
139 : {
140 33274 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
141 33274 : int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
142 33274 : int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
143 33274 : uint8 mask = flags << mapOffset;
144 : char *map;
145 33274 : bool cleared = false;
146 :
147 : /* Must never clear all_visible bit while leaving all_frozen bit set */
148 : Assert(flags & VISIBILITYMAP_VALID_BITS);
149 : Assert(flags != VISIBILITYMAP_ALL_VISIBLE);
150 :
151 : #ifdef TRACE_VISIBILITYMAP
152 : elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
153 : #endif
154 :
155 33274 : if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
156 0 : elog(ERROR, "wrong buffer passed to visibilitymap_clear");
157 :
158 33274 : LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
159 33274 : map = PageGetContents(BufferGetPage(vmbuf));
160 :
161 33274 : if (map[mapByte] & mask)
162 : {
163 29178 : map[mapByte] &= ~mask;
164 :
165 29178 : MarkBufferDirty(vmbuf);
166 29178 : cleared = true;
167 : }
168 :
169 33274 : LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
170 :
171 33274 : return cleared;
172 : }
173 :
174 : /*
175 : * visibilitymap_pin - pin a map page for setting a bit
176 : *
177 : * Setting a bit in the visibility map is a two-phase operation. First, call
178 : * visibilitymap_pin, to pin the visibility map page containing the bit for
179 : * the heap page. Because that can require I/O to read the map page, you
180 : * shouldn't hold a lock on the heap page while doing that. Then, call
181 : * visibilitymap_set to actually set the bit.
182 : *
183 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
184 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
185 : * relation. On return, *vmbuf is a valid buffer with the map page containing
186 : * the bit for heapBlk.
187 : *
188 : * If the page doesn't exist in the map file yet, it is extended.
189 : */
190 : void
191 843812 : visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
192 : {
193 843812 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
194 :
195 : /* Reuse the old pinned buffer if possible */
196 843812 : if (BufferIsValid(*vmbuf))
197 : {
198 764230 : if (BufferGetBlockNumber(*vmbuf) == mapBlock)
199 764230 : return;
200 :
201 0 : ReleaseBuffer(*vmbuf);
202 : }
203 79582 : *vmbuf = vm_readbuf(rel, mapBlock, true);
204 : }
205 :
206 : /*
207 : * visibilitymap_pin_ok - do we already have the correct page pinned?
208 : *
209 : * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
210 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
211 : * relation. The return value indicates whether the buffer covers the
212 : * given heapBlk.
213 : */
214 : bool
215 27614 : visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
216 : {
217 27614 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
218 :
219 27614 : return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
220 : }
221 :
222 : /*
223 : * visibilitymap_set - set bit(s) on a previously pinned page
224 : *
225 : * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
226 : * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
227 : * one provided; in normal running, we generate a new XLOG record and set the
228 : * page LSN to that value (though the heap page's LSN may *not* be updated;
229 : * see below). cutoff_xid is the largest xmin on the page being marked
230 : * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
231 : * if the page contains no tuples. It can also be set to InvalidTransactionId
232 : * when a page that is already all-visible is being marked all-frozen.
233 : *
234 : * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
235 : * this function. Except in recovery, caller should also pass the heap
236 : * buffer. When checksums are enabled and we're not in recovery, we must add
237 : * the heap buffer to the WAL chain to protect it from being torn.
238 : *
239 : * You must pass a buffer containing the correct map page to this function.
240 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
241 : * any I/O.
242 : *
243 : * Returns the state of the page's VM bits before setting flags.
244 : */
245 : uint8
246 96850 : visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
247 : XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
248 : uint8 flags)
249 : {
250 96850 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
251 96850 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
252 96850 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
253 : Page page;
254 : uint8 *map;
255 : uint8 status;
256 :
257 : #ifdef TRACE_VISIBILITYMAP
258 : elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
259 : #endif
260 :
261 : Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
262 : Assert(InRecovery || PageIsAllVisible((Page) BufferGetPage(heapBuf)));
263 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
264 :
265 : /* Must never set all_frozen bit without also setting all_visible bit */
266 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
267 :
268 : /* Check that we have the right heap page pinned, if present */
269 96850 : if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
270 0 : elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
271 :
272 : /* Check that we have the right VM page pinned */
273 96850 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
274 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
275 :
276 96850 : page = BufferGetPage(vmBuf);
277 96850 : map = (uint8 *) PageGetContents(page);
278 96850 : LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
279 :
280 96850 : status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
281 96850 : if (flags != status)
282 : {
283 96850 : START_CRIT_SECTION();
284 :
285 96850 : map[mapByte] |= (flags << mapOffset);
286 96850 : MarkBufferDirty(vmBuf);
287 :
288 96850 : if (RelationNeedsWAL(rel))
289 : {
290 88742 : if (XLogRecPtrIsInvalid(recptr))
291 : {
292 : Assert(!InRecovery);
293 81266 : recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
294 :
295 : /*
296 : * If data checksums are enabled (or wal_log_hints=on), we
297 : * need to protect the heap page from being torn.
298 : *
299 : * If not, then we must *not* update the heap page's LSN. In
300 : * this case, the FPI for the heap page was omitted from the
301 : * WAL record inserted above, so it would be incorrect to
302 : * update the heap page's LSN.
303 : */
304 81266 : if (XLogHintBitIsNeeded())
305 : {
306 75414 : Page heapPage = BufferGetPage(heapBuf);
307 :
308 75414 : PageSetLSN(heapPage, recptr);
309 : }
310 : }
311 88742 : PageSetLSN(page, recptr);
312 : }
313 :
314 96850 : END_CRIT_SECTION();
315 : }
316 :
317 96850 : LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
318 96850 : return status;
319 : }
320 :
321 : /*
322 : * visibilitymap_get_status - get status of bits
323 : *
324 : * Are all tuples on heapBlk visible to all or are marked frozen, according
325 : * to the visibility map?
326 : *
327 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
328 : * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
329 : * relation. On return, *vmbuf is a valid buffer with the map page containing
330 : * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
331 : * releasing *vmbuf after it's done testing and setting bits.
332 : *
333 : * NOTE: This function is typically called without a lock on the heap page,
334 : * so somebody else could change the bit just after we look at it. In fact,
335 : * since we don't lock the visibility map page either, it's even possible that
336 : * someone else could have changed the bit just before we look at it, but yet
337 : * we might see the old value. It is the caller's responsibility to deal with
338 : * all concurrency issues!
339 : */
340 : uint8
341 6717270 : visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
342 : {
343 6717270 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
344 6717270 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
345 6717270 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
346 : char *map;
347 : uint8 result;
348 :
349 : #ifdef TRACE_VISIBILITYMAP
350 : elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
351 : #endif
352 :
353 : /* Reuse the old pinned buffer if possible */
354 6717270 : if (BufferIsValid(*vmbuf))
355 : {
356 4960068 : if (BufferGetBlockNumber(*vmbuf) != mapBlock)
357 : {
358 0 : ReleaseBuffer(*vmbuf);
359 0 : *vmbuf = InvalidBuffer;
360 : }
361 : }
362 :
363 6717270 : if (!BufferIsValid(*vmbuf))
364 : {
365 1757202 : *vmbuf = vm_readbuf(rel, mapBlock, false);
366 1757202 : if (!BufferIsValid(*vmbuf))
367 1611712 : return false;
368 : }
369 :
370 5105558 : map = PageGetContents(BufferGetPage(*vmbuf));
371 :
372 : /*
373 : * A single byte read is atomic. There could be memory-ordering effects
374 : * here, but for performance reasons we make it the caller's job to worry
375 : * about that.
376 : */
377 5105558 : result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
378 5105558 : return result;
379 : }
380 :
381 : /*
382 : * visibilitymap_count - count number of bits set in visibility map
383 : *
384 : * Note: we ignore the possibility of race conditions when the table is being
385 : * extended concurrently with the call. New pages added to the table aren't
386 : * going to be marked all-visible or all-frozen, so they won't affect the result.
387 : */
388 : void
389 139232 : visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
390 : {
391 : BlockNumber mapBlock;
392 139232 : BlockNumber nvisible = 0;
393 139232 : BlockNumber nfrozen = 0;
394 :
395 : /* all_visible must be specified */
396 : Assert(all_visible);
397 :
398 139232 : for (mapBlock = 0;; mapBlock++)
399 50142 : {
400 : Buffer mapBuffer;
401 : uint64 *map;
402 :
403 : /*
404 : * Read till we fall off the end of the map. We assume that any extra
405 : * bytes in the last page are zeroed, so we don't bother excluding
406 : * them from the count.
407 : */
408 189374 : mapBuffer = vm_readbuf(rel, mapBlock, false);
409 189374 : if (!BufferIsValid(mapBuffer))
410 139232 : break;
411 :
412 : /*
413 : * We choose not to lock the page, since the result is going to be
414 : * immediately stale anyway if anyone is concurrently setting or
415 : * clearing bits, and we only really need an approximate value.
416 : */
417 50142 : map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
418 :
419 50142 : nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
420 50142 : if (all_frozen)
421 0 : nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
422 :
423 50142 : ReleaseBuffer(mapBuffer);
424 : }
425 :
426 139232 : *all_visible = nvisible;
427 139232 : if (all_frozen)
428 0 : *all_frozen = nfrozen;
429 139232 : }
430 :
431 : /*
432 : * visibilitymap_prepare_truncate -
433 : * prepare for truncation of the visibility map
434 : *
435 : * nheapblocks is the new size of the heap.
436 : *
437 : * Return the number of blocks of new visibility map.
438 : * If it's InvalidBlockNumber, there is nothing to truncate;
439 : * otherwise the caller is responsible for calling smgrtruncate()
440 : * to truncate the visibility map pages.
441 : */
442 : BlockNumber
443 374 : visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
444 : {
445 : BlockNumber newnblocks;
446 :
447 : /* last remaining block, byte, and bit */
448 374 : BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
449 374 : uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
450 374 : uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
451 :
452 : #ifdef TRACE_VISIBILITYMAP
453 : elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
454 : #endif
455 :
456 : /*
457 : * If no visibility map has been created yet for this relation, there's
458 : * nothing to truncate.
459 : */
460 374 : if (!smgrexists(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM))
461 0 : return InvalidBlockNumber;
462 :
463 : /*
464 : * Unless the new size is exactly at a visibility map page boundary, the
465 : * tail bits in the last remaining map page, representing truncated heap
466 : * blocks, need to be cleared. This is not only tidy, but also necessary
467 : * because we don't get a chance to clear the bits if the heap is extended
468 : * again.
469 : */
470 374 : if (truncByte != 0 || truncOffset != 0)
471 230 : {
472 : Buffer mapBuffer;
473 : Page page;
474 : char *map;
475 :
476 230 : newnblocks = truncBlock + 1;
477 :
478 230 : mapBuffer = vm_readbuf(rel, truncBlock, false);
479 230 : if (!BufferIsValid(mapBuffer))
480 : {
481 : /* nothing to do, the file was already smaller */
482 0 : return InvalidBlockNumber;
483 : }
484 :
485 230 : page = BufferGetPage(mapBuffer);
486 230 : map = PageGetContents(page);
487 :
488 230 : LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
489 :
490 : /* NO EREPORT(ERROR) from here till changes are logged */
491 230 : START_CRIT_SECTION();
492 :
493 : /* Clear out the unwanted bytes. */
494 230 : MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
495 :
496 : /*----
497 : * Mask out the unwanted bits of the last remaining byte.
498 : *
499 : * ((1 << 0) - 1) = 00000000
500 : * ((1 << 1) - 1) = 00000001
501 : * ...
502 : * ((1 << 6) - 1) = 00111111
503 : * ((1 << 7) - 1) = 01111111
504 : *----
505 : */
506 230 : map[truncByte] &= (1 << truncOffset) - 1;
507 :
508 : /*
509 : * Truncation of a relation is WAL-logged at a higher-level, and we
510 : * will be called at WAL replay. But if checksums are enabled, we need
511 : * to still write a WAL record to protect against a torn page, if the
512 : * page is flushed to disk before the truncation WAL record. We cannot
513 : * use MarkBufferDirtyHint here, because that will not dirty the page
514 : * during recovery.
515 : */
516 230 : MarkBufferDirty(mapBuffer);
517 230 : if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded())
518 198 : log_newpage_buffer(mapBuffer, false);
519 :
520 230 : END_CRIT_SECTION();
521 :
522 230 : UnlockReleaseBuffer(mapBuffer);
523 : }
524 : else
525 144 : newnblocks = truncBlock;
526 :
527 374 : if (smgrnblocks(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM) <= newnblocks)
528 : {
529 : /* nothing to do, the file was already smaller than requested size */
530 230 : return InvalidBlockNumber;
531 : }
532 :
533 144 : return newnblocks;
534 : }
535 :
536 : /*
537 : * Read a visibility map page.
538 : *
539 : * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
540 : * true, the visibility map file is extended.
541 : */
542 : static Buffer
543 2026388 : vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
544 : {
545 : Buffer buf;
546 : SMgrRelation reln;
547 :
548 : /*
549 : * Caution: re-using this smgr pointer could fail if the relcache entry
550 : * gets closed. It's safe as long as we only do smgr-level operations
551 : * between here and the last use of the pointer.
552 : */
553 2026388 : reln = RelationGetSmgr(rel);
554 :
555 : /*
556 : * If we haven't cached the size of the visibility map fork yet, check it
557 : * first.
558 : */
559 2026388 : if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
560 : {
561 158024 : if (smgrexists(reln, VISIBILITYMAP_FORKNUM))
562 65356 : smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
563 : else
564 92668 : reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
565 : }
566 :
567 : /*
568 : * For reading we use ZERO_ON_ERROR mode, and initialize the page if
569 : * necessary. It's always safe to clear bits, so it's better to clear
570 : * corrupt pages than error out.
571 : *
572 : * We use the same path below to initialize pages when extending the
573 : * relation, as a concurrent extension can end up with vm_extend()
574 : * returning an already-initialized page.
575 : */
576 2026388 : if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
577 : {
578 1756478 : if (extend)
579 5534 : buf = vm_extend(rel, blkno + 1);
580 : else
581 1750944 : return InvalidBuffer;
582 : }
583 : else
584 269910 : buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
585 : RBM_ZERO_ON_ERROR, NULL);
586 :
587 : /*
588 : * Initializing the page when needed is trickier than it looks, because of
589 : * the possibility of multiple backends doing this concurrently, and our
590 : * desire to not uselessly take the buffer lock in the normal path where
591 : * the page is OK. We must take the lock to initialize the page, so
592 : * recheck page newness after we have the lock, in case someone else
593 : * already did it. Also, because we initially check PageIsNew with no
594 : * lock, it's possible to fall through and return the buffer while someone
595 : * else is still initializing the page (i.e., we might see pd_upper as set
596 : * but other page header fields are still zeroes). This is harmless for
597 : * callers that will take a buffer lock themselves, but some callers
598 : * inspect the page without any lock at all. The latter is OK only so
599 : * long as it doesn't depend on the page header having correct contents.
600 : * Current usage is safe because PageGetContents() does not require that.
601 : */
602 275444 : if (PageIsNew(BufferGetPage(buf)))
603 : {
604 5614 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
605 5614 : if (PageIsNew(BufferGetPage(buf)))
606 5614 : PageInit(BufferGetPage(buf), BLCKSZ, 0);
607 5614 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
608 : }
609 275444 : return buf;
610 : }
611 :
612 : /*
613 : * Ensure that the visibility map fork is at least vm_nblocks long, extending
614 : * it if necessary with zeroed pages.
615 : */
616 : static Buffer
617 5534 : vm_extend(Relation rel, BlockNumber vm_nblocks)
618 : {
619 : Buffer buf;
620 :
621 5534 : buf = ExtendBufferedRelTo(BMR_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
622 : EB_CREATE_FORK_IF_NEEDED |
623 : EB_CLEAR_SIZE_CACHE,
624 : vm_nblocks,
625 : RBM_ZERO_ON_ERROR);
626 :
627 : /*
628 : * Send a shared-inval message to force other backends to close any smgr
629 : * references they may have for this rel, which we are about to change.
630 : * This is a useful optimization because it means that backends don't have
631 : * to keep checking for creation or extension of the file, which happens
632 : * infrequently.
633 : */
634 5534 : CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
635 :
636 5534 : return buf;
637 : }
|