Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * visibilitymap.c
4 : * bitmap for tracking visibility of heap tuples
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/visibilitymap.c
12 : *
13 : * INTERFACE ROUTINES
14 : * visibilitymap_clear - clear bits for one page in the visibility map
15 : * visibilitymap_pin - pin a map page for setting a bit
16 : * visibilitymap_pin_ok - check whether correct map page is already pinned
17 : * visibilitymap_set - set bit(s) in a previously pinned page and log
18 : * visibilitymap_set_vmbits - set bit(s) in a pinned page
19 : * visibilitymap_get_status - get status of bits
20 : * visibilitymap_count - count number of bits set in visibility map
21 : * visibilitymap_prepare_truncate -
22 : * prepare for truncation of the visibility map
23 : *
24 : * NOTES
25 : *
26 : * The visibility map is a bitmap with two bits (all-visible and all-frozen)
27 : * per heap page. A set all-visible bit means that all tuples on the page are
28 : * known visible to all transactions, and therefore the page doesn't need to
29 : * be vacuumed. A set all-frozen bit means that all tuples on the page are
30 : * completely frozen, and therefore the page doesn't need to be vacuumed even
31 : * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
32 : * The all-frozen bit must be set only when the page is already all-visible.
33 : *
34 : * The map is conservative in the sense that we make sure that whenever a bit
35 : * is set, we know the condition is true, but if a bit is not set, it might or
36 : * might not be true.
37 : *
38 : * Clearing visibility map bits is not separately WAL-logged. The callers
39 : * must make sure that whenever a bit is cleared, the bit is cleared on WAL
40 : * replay of the updating operation as well.
41 : *
42 : * When we *set* a visibility map during VACUUM, we must write WAL. This may
43 : * seem counterintuitive, since the bit is basically a hint: if it is clear,
44 : * it may still be the case that every tuple on the page is visible to all
45 : * transactions; we just don't know that for certain. The difficulty is that
46 : * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
47 : * on the page itself, and the visibility map bit. If a crash occurs after the
48 : * visibility map page makes it to disk and before the updated heap page makes
49 : * it to disk, redo must set the bit on the heap page. Otherwise, the next
50 : * insert, update, or delete on the heap page will fail to realize that the
51 : * visibility map bit must be cleared, possibly causing index-only scans to
52 : * return wrong answers.
53 : *
54 : * VACUUM will normally skip pages for which the visibility map bit is set;
55 : * such pages can't contain any dead tuples and therefore don't need vacuuming.
56 : *
57 : * LOCKING
58 : *
59 : * In heapam.c, whenever a page is modified so that not all tuples on the
60 : * page are visible to everyone anymore, the corresponding bit in the
61 : * visibility map is cleared. In order to be crash-safe, we need to do this
62 : * while still holding a lock on the heap page and in the same critical
63 : * section that logs the page modification. However, we don't want to hold
64 : * the buffer lock over any I/O that may be required to read in the visibility
65 : * map page. To avoid this, we examine the heap page before locking it;
66 : * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
67 : * bit. Then, we lock the buffer. But this creates a race condition: there
68 : * is a possibility that in the time it takes to lock the buffer, the
69 : * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
70 : * buffer, pin the visibility map page, and relock the buffer. This shouldn't
71 : * happen often, because only VACUUM currently sets visibility map bits,
72 : * and the race will only occur if VACUUM processes a given page at almost
73 : * exactly the same time that someone tries to further modify it.
74 : *
75 : * To set a bit, you need to hold a lock on the heap page. That prevents
76 : * the race condition where VACUUM sees that all tuples on the page are
77 : * visible to everyone, but another backend modifies the page before VACUUM
78 : * sets the bit in the visibility map.
79 : *
80 : * When a bit is set, the LSN of the visibility map page is updated to make
81 : * sure that the visibility map update doesn't get written to disk before the
82 : * WAL record of the changes that made it possible to set the bit is flushed.
83 : * But when a bit is cleared, we don't have to do that because it's always
84 : * safe to clear a bit in the map from correctness point of view.
85 : *
86 : *-------------------------------------------------------------------------
87 : */
88 : #include "postgres.h"
89 :
90 : #include "access/heapam_xlog.h"
91 : #include "access/visibilitymap.h"
92 : #include "access/xloginsert.h"
93 : #include "access/xlogutils.h"
94 : #include "miscadmin.h"
95 : #include "port/pg_bitutils.h"
96 : #include "storage/bufmgr.h"
97 : #include "storage/smgr.h"
98 : #include "utils/inval.h"
99 : #include "utils/rel.h"
100 :
101 :
102 : /*#define TRACE_VISIBILITYMAP */
103 :
104 : /*
105 : * Size of the bitmap on each visibility map page, in bytes. There's no
106 : * extra headers, so the whole page minus the standard page header is
107 : * used for the bitmap.
108 : */
109 : #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
110 :
111 : /* Number of heap blocks we can represent in one byte */
112 : #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
113 :
114 : /* Number of heap blocks we can represent in one visibility map page. */
115 : #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
116 :
117 : /* Mapping from heap block number to the right bit in the visibility map */
118 : #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
119 : #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
120 : #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
121 :
122 : /* Masks for counting subsets of bits in the visibility map. */
123 : #define VISIBLE_MASK8 (0x55) /* The lower bit of each bit pair */
124 : #define FROZEN_MASK8 (0xaa) /* The upper bit of each bit pair */
125 :
126 : /* prototypes for internal routines */
127 : static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
128 : static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
129 :
130 :
131 : /*
132 : * visibilitymap_clear - clear specified bits for one page in visibility map
133 : *
134 : * You must pass a buffer containing the correct map page to this function.
135 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
136 : * any I/O. Returns true if any bits have been cleared and false otherwise.
137 : */
138 : bool
139 37146 : visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
140 : {
141 37146 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
142 37146 : int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
143 37146 : int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
144 37146 : uint8 mask = flags << mapOffset;
145 : char *map;
146 37146 : bool cleared = false;
147 :
148 : /* Must never clear all_visible bit while leaving all_frozen bit set */
149 : Assert(flags & VISIBILITYMAP_VALID_BITS);
150 : Assert(flags != VISIBILITYMAP_ALL_VISIBLE);
151 :
152 : #ifdef TRACE_VISIBILITYMAP
153 : elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
154 : #endif
155 :
156 37146 : if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
157 0 : elog(ERROR, "wrong buffer passed to visibilitymap_clear");
158 :
159 37146 : LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
160 37146 : map = PageGetContents(BufferGetPage(vmbuf));
161 :
162 37146 : if (map[mapByte] & mask)
163 : {
164 33016 : map[mapByte] &= ~mask;
165 :
166 33016 : MarkBufferDirty(vmbuf);
167 33016 : cleared = true;
168 : }
169 :
170 37146 : LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
171 :
172 37146 : return cleared;
173 : }
174 :
175 : /*
176 : * visibilitymap_pin - pin a map page for setting a bit
177 : *
178 : * Setting a bit in the visibility map is a two-phase operation. First, call
179 : * visibilitymap_pin, to pin the visibility map page containing the bit for
180 : * the heap page. Because that can require I/O to read the map page, you
181 : * shouldn't hold a lock on the heap page while doing that. Then, call
182 : * visibilitymap_set to actually set the bit.
183 : *
184 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
185 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
186 : * relation. On return, *vmbuf is a valid buffer with the map page containing
187 : * the bit for heapBlk.
188 : *
189 : * If the page doesn't exist in the map file yet, it is extended.
190 : */
191 : void
192 1242180 : visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
193 : {
194 1242180 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
195 :
196 : /* Reuse the old pinned buffer if possible */
197 1242180 : if (BufferIsValid(*vmbuf))
198 : {
199 1117672 : if (BufferGetBlockNumber(*vmbuf) == mapBlock)
200 1117672 : return;
201 :
202 0 : ReleaseBuffer(*vmbuf);
203 : }
204 124508 : *vmbuf = vm_readbuf(rel, mapBlock, true);
205 : }
206 :
207 : /*
208 : * visibilitymap_pin_ok - do we already have the correct page pinned?
209 : *
210 : * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
211 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
212 : * relation. The return value indicates whether the buffer covers the
213 : * given heapBlk.
214 : */
215 : bool
216 28624 : visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
217 : {
218 28624 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
219 :
220 28624 : return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
221 : }
222 :
223 : /*
224 : * visibilitymap_set - set bit(s) on a previously pinned page
225 : *
226 : * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
227 : * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
228 : * one provided; in normal running, we generate a new XLOG record and set the
229 : * page LSN to that value (though the heap page's LSN may *not* be updated;
230 : * see below). cutoff_xid is the largest xmin on the page being marked
231 : * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
232 : * if the page contains no tuples. It can also be set to InvalidTransactionId
233 : * when a page that is already all-visible is being marked all-frozen.
234 : *
235 : * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
236 : * this function. Except in recovery, caller should also pass the heap
237 : * buffer. When checksums are enabled and we're not in recovery, we must add
238 : * the heap buffer to the WAL chain to protect it from being torn.
239 : *
240 : * You must pass a buffer containing the correct map page to this function.
241 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
242 : * any I/O.
243 : *
244 : * Returns the state of the page's VM bits before setting flags.
245 : */
246 : uint8
247 79076 : visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
248 : XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
249 : uint8 flags)
250 : {
251 79076 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
252 79076 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
253 79076 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
254 : Page page;
255 : uint8 *map;
256 : uint8 status;
257 :
258 : #ifdef TRACE_VISIBILITYMAP
259 : elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
260 : flags, RelationGetRelationName(rel), heapBlk);
261 : #endif
262 :
263 : Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
264 : Assert(InRecovery || PageIsAllVisible(BufferGetPage(heapBuf)));
265 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
266 :
267 : /* Must never set all_frozen bit without also setting all_visible bit */
268 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
269 :
270 : /* Check that we have the right heap page pinned, if present */
271 79076 : if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
272 0 : elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
273 :
274 : Assert(!BufferIsValid(heapBuf) ||
275 : BufferIsLockedByMeInMode(heapBuf, BUFFER_LOCK_EXCLUSIVE));
276 :
277 : /* Check that we have the right VM page pinned */
278 79076 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
279 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
280 :
281 79076 : page = BufferGetPage(vmBuf);
282 79076 : map = (uint8 *) PageGetContents(page);
283 79076 : LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
284 :
285 79076 : status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
286 79076 : if (flags != status)
287 : {
288 79076 : START_CRIT_SECTION();
289 :
290 79076 : map[mapByte] |= (flags << mapOffset);
291 79076 : MarkBufferDirty(vmBuf);
292 :
293 79076 : if (RelationNeedsWAL(rel))
294 : {
295 75956 : if (XLogRecPtrIsInvalid(recptr))
296 : {
297 : Assert(!InRecovery);
298 67046 : recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
299 :
300 : /*
301 : * If data checksums are enabled (or wal_log_hints=on), we
302 : * need to protect the heap page from being torn.
303 : *
304 : * If not, then we must *not* update the heap page's LSN. In
305 : * this case, the FPI for the heap page was omitted from the
306 : * WAL record inserted above, so it would be incorrect to
307 : * update the heap page's LSN.
308 : */
309 67046 : if (XLogHintBitIsNeeded())
310 : {
311 60902 : Page heapPage = BufferGetPage(heapBuf);
312 :
313 60902 : PageSetLSN(heapPage, recptr);
314 : }
315 : }
316 75956 : PageSetLSN(page, recptr);
317 : }
318 :
319 79076 : END_CRIT_SECTION();
320 : }
321 :
322 79076 : LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
323 79076 : return status;
324 : }
325 :
326 : /*
327 : * Set VM (visibility map) flags in the VM block in vmBuf.
328 : *
329 : * This function is intended for callers that log VM changes together
330 : * with the heap page modifications that rendered the page all-visible.
331 : * Callers that log VM changes separately should use visibilitymap_set().
332 : *
333 : * vmBuf must be pinned and exclusively locked, and it must cover the VM bits
334 : * corresponding to heapBlk.
335 : *
336 : * In normal operation (not recovery), this must be called inside a critical
337 : * section that also applies the necessary heap page changes and, if
338 : * applicable, emits WAL.
339 : *
340 : * The caller is responsible for ensuring consistency between the heap page
341 : * and the VM page by holding a pin and exclusive lock on the buffer
342 : * containing heapBlk.
343 : *
344 : * rlocator is used only for debugging messages.
345 : */
346 : uint8
347 35008 : visibilitymap_set_vmbits(BlockNumber heapBlk,
348 : Buffer vmBuf, uint8 flags,
349 : const RelFileLocator rlocator)
350 : {
351 35008 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
352 35008 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
353 35008 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
354 : Page page;
355 : uint8 *map;
356 : uint8 status;
357 :
358 : #ifdef TRACE_VISIBILITYMAP
359 : elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
360 : flags,
361 : relpathbackend(rlocator, MyProcNumber, MAIN_FORKNUM).str,
362 : heapBlk);
363 : #endif
364 :
365 : /* Call in same critical section where WAL is emitted. */
366 : Assert(InRecovery || CritSectionCount > 0);
367 :
368 : /* Flags should be valid. Also never clear bits with this function */
369 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
370 :
371 : /* Must never set all_frozen bit without also setting all_visible bit */
372 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
373 :
374 : /* Check that we have the right VM page pinned */
375 35008 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
376 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
377 :
378 : Assert(BufferIsLockedByMeInMode(vmBuf, BUFFER_LOCK_EXCLUSIVE));
379 :
380 35008 : page = BufferGetPage(vmBuf);
381 35008 : map = (uint8 *) PageGetContents(page);
382 :
383 35008 : status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
384 35008 : if (flags != status)
385 : {
386 35008 : map[mapByte] |= (flags << mapOffset);
387 35008 : MarkBufferDirty(vmBuf);
388 : }
389 :
390 35008 : return status;
391 : }
392 :
393 : /*
394 : * visibilitymap_get_status - get status of bits
395 : *
396 : * Are all tuples on heapBlk visible to all or are marked frozen, according
397 : * to the visibility map?
398 : *
399 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
400 : * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
401 : * relation. On return, *vmbuf is a valid buffer with the map page containing
402 : * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
403 : * releasing *vmbuf after it's done testing and setting bits.
404 : *
405 : * NOTE: This function is typically called without a lock on the heap page,
406 : * so somebody else could change the bit just after we look at it. In fact,
407 : * since we don't lock the visibility map page either, it's even possible that
408 : * someone else could have changed the bit just before we look at it, but yet
409 : * we might see the old value. It is the caller's responsibility to deal with
410 : * all concurrency issues!
411 : */
412 : uint8
413 7659286 : visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
414 : {
415 7659286 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
416 7659286 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
417 7659286 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
418 : char *map;
419 : uint8 result;
420 :
421 : #ifdef TRACE_VISIBILITYMAP
422 : elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
423 : #endif
424 :
425 : /* Reuse the old pinned buffer if possible */
426 7659286 : if (BufferIsValid(*vmbuf))
427 : {
428 5922484 : if (BufferGetBlockNumber(*vmbuf) != mapBlock)
429 : {
430 0 : ReleaseBuffer(*vmbuf);
431 0 : *vmbuf = InvalidBuffer;
432 : }
433 : }
434 :
435 7659286 : if (!BufferIsValid(*vmbuf))
436 : {
437 1736802 : *vmbuf = vm_readbuf(rel, mapBlock, false);
438 1736802 : if (!BufferIsValid(*vmbuf))
439 1534046 : return (uint8) 0;
440 : }
441 :
442 6125240 : map = PageGetContents(BufferGetPage(*vmbuf));
443 :
444 : /*
445 : * A single byte read is atomic. There could be memory-ordering effects
446 : * here, but for performance reasons we make it the caller's job to worry
447 : * about that.
448 : */
449 6125240 : result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
450 6125240 : return result;
451 : }
452 :
453 : /*
454 : * visibilitymap_count - count number of bits set in visibility map
455 : *
456 : * Note: we ignore the possibility of race conditions when the table is being
457 : * extended concurrently with the call. New pages added to the table aren't
458 : * going to be marked all-visible or all-frozen, so they won't affect the result.
459 : */
460 : void
461 244772 : visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
462 : {
463 : BlockNumber mapBlock;
464 244772 : BlockNumber nvisible = 0;
465 244772 : BlockNumber nfrozen = 0;
466 :
467 : /* all_visible must be specified */
468 : Assert(all_visible);
469 :
470 244772 : for (mapBlock = 0;; mapBlock++)
471 92324 : {
472 : Buffer mapBuffer;
473 : uint64 *map;
474 :
475 : /*
476 : * Read till we fall off the end of the map. We assume that any extra
477 : * bytes in the last page are zeroed, so we don't bother excluding
478 : * them from the count.
479 : */
480 337096 : mapBuffer = vm_readbuf(rel, mapBlock, false);
481 337096 : if (!BufferIsValid(mapBuffer))
482 244772 : break;
483 :
484 : /*
485 : * We choose not to lock the page, since the result is going to be
486 : * immediately stale anyway if anyone is concurrently setting or
487 : * clearing bits, and we only really need an approximate value.
488 : */
489 92324 : map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
490 :
491 92324 : nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
492 92324 : if (all_frozen)
493 92324 : nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
494 :
495 92324 : ReleaseBuffer(mapBuffer);
496 : }
497 :
498 244772 : *all_visible = nvisible;
499 244772 : if (all_frozen)
500 244772 : *all_frozen = nfrozen;
501 244772 : }
502 :
503 : /*
504 : * visibilitymap_prepare_truncate -
505 : * prepare for truncation of the visibility map
506 : *
507 : * nheapblocks is the new size of the heap.
508 : *
509 : * Return the number of blocks of new visibility map.
510 : * If it's InvalidBlockNumber, there is nothing to truncate;
511 : * otherwise the caller is responsible for calling smgrtruncate()
512 : * to truncate the visibility map pages.
513 : */
514 : BlockNumber
515 354 : visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
516 : {
517 : BlockNumber newnblocks;
518 :
519 : /* last remaining block, byte, and bit */
520 354 : BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
521 354 : uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
522 354 : uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
523 :
524 : #ifdef TRACE_VISIBILITYMAP
525 : elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
526 : #endif
527 :
528 : /*
529 : * If no visibility map has been created yet for this relation, there's
530 : * nothing to truncate.
531 : */
532 354 : if (!smgrexists(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM))
533 0 : return InvalidBlockNumber;
534 :
535 : /*
536 : * Unless the new size is exactly at a visibility map page boundary, the
537 : * tail bits in the last remaining map page, representing truncated heap
538 : * blocks, need to be cleared. This is not only tidy, but also necessary
539 : * because we don't get a chance to clear the bits if the heap is extended
540 : * again.
541 : */
542 354 : if (truncByte != 0 || truncOffset != 0)
543 216 : {
544 : Buffer mapBuffer;
545 : Page page;
546 : char *map;
547 :
548 216 : newnblocks = truncBlock + 1;
549 :
550 216 : mapBuffer = vm_readbuf(rel, truncBlock, false);
551 216 : if (!BufferIsValid(mapBuffer))
552 : {
553 : /* nothing to do, the file was already smaller */
554 0 : return InvalidBlockNumber;
555 : }
556 :
557 216 : page = BufferGetPage(mapBuffer);
558 216 : map = PageGetContents(page);
559 :
560 216 : LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
561 :
562 : /* NO EREPORT(ERROR) from here till changes are logged */
563 216 : START_CRIT_SECTION();
564 :
565 : /* Clear out the unwanted bytes. */
566 216 : MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
567 :
568 : /*----
569 : * Mask out the unwanted bits of the last remaining byte.
570 : *
571 : * ((1 << 0) - 1) = 00000000
572 : * ((1 << 1) - 1) = 00000001
573 : * ...
574 : * ((1 << 6) - 1) = 00111111
575 : * ((1 << 7) - 1) = 01111111
576 : *----
577 : */
578 216 : map[truncByte] &= (1 << truncOffset) - 1;
579 :
580 : /*
581 : * Truncation of a relation is WAL-logged at a higher-level, and we
582 : * will be called at WAL replay. But if checksums are enabled, we need
583 : * to still write a WAL record to protect against a torn page, if the
584 : * page is flushed to disk before the truncation WAL record. We cannot
585 : * use MarkBufferDirtyHint here, because that will not dirty the page
586 : * during recovery.
587 : */
588 216 : MarkBufferDirty(mapBuffer);
589 216 : if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded())
590 166 : log_newpage_buffer(mapBuffer, false);
591 :
592 216 : END_CRIT_SECTION();
593 :
594 216 : UnlockReleaseBuffer(mapBuffer);
595 : }
596 : else
597 138 : newnblocks = truncBlock;
598 :
599 354 : if (smgrnblocks(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM) <= newnblocks)
600 : {
601 : /* nothing to do, the file was already smaller than requested size */
602 216 : return InvalidBlockNumber;
603 : }
604 :
605 138 : return newnblocks;
606 : }
607 :
608 : /*
609 : * Read a visibility map page.
610 : *
611 : * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
612 : * true, the visibility map file is extended.
613 : */
614 : static Buffer
615 2198622 : vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
616 : {
617 : Buffer buf;
618 : SMgrRelation reln;
619 :
620 : /*
621 : * Caution: re-using this smgr pointer could fail if the relcache entry
622 : * gets closed. It's safe as long as we only do smgr-level operations
623 : * between here and the last use of the pointer.
624 : */
625 2198622 : reln = RelationGetSmgr(rel);
626 :
627 : /*
628 : * If we haven't cached the size of the visibility map fork yet, check it
629 : * first.
630 : */
631 2198622 : if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
632 : {
633 267774 : if (smgrexists(reln, VISIBILITYMAP_FORKNUM))
634 111622 : smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
635 : else
636 156152 : reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
637 : }
638 :
639 : /*
640 : * For reading we use ZERO_ON_ERROR mode, and initialize the page if
641 : * necessary. It's always safe to clear bits, so it's better to clear
642 : * corrupt pages than error out.
643 : *
644 : * We use the same path below to initialize pages when extending the
645 : * relation, as a concurrent extension can end up with vm_extend()
646 : * returning an already-initialized page.
647 : */
648 2198622 : if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
649 : {
650 1784804 : if (extend)
651 5986 : buf = vm_extend(rel, blkno + 1);
652 : else
653 1778818 : return InvalidBuffer;
654 : }
655 : else
656 413818 : buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
657 : RBM_ZERO_ON_ERROR, NULL);
658 :
659 : /*
660 : * Initializing the page when needed is trickier than it looks, because of
661 : * the possibility of multiple backends doing this concurrently, and our
662 : * desire to not uselessly take the buffer lock in the normal path where
663 : * the page is OK. We must take the lock to initialize the page, so
664 : * recheck page newness after we have the lock, in case someone else
665 : * already did it. Also, because we initially check PageIsNew with no
666 : * lock, it's possible to fall through and return the buffer while someone
667 : * else is still initializing the page (i.e., we might see pd_upper as set
668 : * but other page header fields are still zeroes). This is harmless for
669 : * callers that will take a buffer lock themselves, but some callers
670 : * inspect the page without any lock at all. The latter is OK only so
671 : * long as it doesn't depend on the page header having correct contents.
672 : * Current usage is safe because PageGetContents() does not require that.
673 : */
674 419804 : if (PageIsNew(BufferGetPage(buf)))
675 : {
676 6116 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
677 6116 : if (PageIsNew(BufferGetPage(buf)))
678 6116 : PageInit(BufferGetPage(buf), BLCKSZ, 0);
679 6116 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
680 : }
681 419804 : return buf;
682 : }
683 :
684 : /*
685 : * Ensure that the visibility map fork is at least vm_nblocks long, extending
686 : * it if necessary with zeroed pages.
687 : */
688 : static Buffer
689 5986 : vm_extend(Relation rel, BlockNumber vm_nblocks)
690 : {
691 : Buffer buf;
692 :
693 5986 : buf = ExtendBufferedRelTo(BMR_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
694 : EB_CREATE_FORK_IF_NEEDED |
695 : EB_CLEAR_SIZE_CACHE,
696 : vm_nblocks,
697 : RBM_ZERO_ON_ERROR);
698 :
699 : /*
700 : * Send a shared-inval message to force other backends to close any smgr
701 : * references they may have for this rel, which we are about to change.
702 : * This is a useful optimization because it means that backends don't have
703 : * to keep checking for creation or extension of the file, which happens
704 : * infrequently.
705 : */
706 5986 : CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
707 :
708 5986 : return buf;
709 : }
|