Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * bufmgr.c
4 : * buffer manager interface routines
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/buffer/bufmgr.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /*
16 : * Principal entry points:
17 : *
18 : * ReadBuffer() -- find or create a buffer holding the requested page,
19 : * and pin it so that no one can destroy it while this process
20 : * is using it.
21 : *
22 : * StartReadBuffer() -- as above, with separate wait step
23 : * StartReadBuffers() -- multiple block version
24 : * WaitReadBuffers() -- second step of above
25 : *
26 : * ReleaseBuffer() -- unpin a buffer
27 : *
28 : * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
29 : * The disk write is delayed until buffer replacement or checkpoint.
30 : *
31 : * See also these files:
32 : * freelist.c -- chooses victim for buffer replacement
33 : * buf_table.c -- manages the buffer lookup table
34 : */
35 : #include "postgres.h"
36 :
37 : #include <sys/file.h>
38 : #include <unistd.h>
39 :
40 : #include "access/tableam.h"
41 : #include "access/xloginsert.h"
42 : #include "access/xlogutils.h"
43 : #ifdef USE_ASSERT_CHECKING
44 : #include "catalog/pg_tablespace_d.h"
45 : #endif
46 : #include "catalog/storage.h"
47 : #include "catalog/storage_xlog.h"
48 : #include "executor/instrument.h"
49 : #include "lib/binaryheap.h"
50 : #include "miscadmin.h"
51 : #include "pg_trace.h"
52 : #include "pgstat.h"
53 : #include "postmaster/bgwriter.h"
54 : #include "storage/aio.h"
55 : #include "storage/buf_internals.h"
56 : #include "storage/bufmgr.h"
57 : #include "storage/fd.h"
58 : #include "storage/ipc.h"
59 : #include "storage/lmgr.h"
60 : #include "storage/proc.h"
61 : #include "storage/read_stream.h"
62 : #include "storage/smgr.h"
63 : #include "storage/standby.h"
64 : #include "utils/memdebug.h"
65 : #include "utils/ps_status.h"
66 : #include "utils/rel.h"
67 : #include "utils/resowner.h"
68 : #include "utils/timestamp.h"
69 :
70 :
71 : /* Note: these two macros only work on shared buffers, not local ones! */
72 : #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
73 : #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
74 :
75 : /* Note: this macro only works on local buffers, not shared ones! */
76 : #define LocalBufHdrGetBlock(bufHdr) \
77 : LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
78 :
79 : /* Bits in SyncOneBuffer's return value */
80 : #define BUF_WRITTEN 0x01
81 : #define BUF_REUSABLE 0x02
82 :
83 : #define RELS_BSEARCH_THRESHOLD 20
84 :
85 : /*
86 : * This is the size (in the number of blocks) above which we scan the
87 : * entire buffer pool to remove the buffers for all the pages of relation
88 : * being dropped. For the relations with size below this threshold, we find
89 : * the buffers by doing lookups in BufMapping table.
90 : */
91 : #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
92 :
93 : typedef struct PrivateRefCountEntry
94 : {
95 : Buffer buffer;
96 : int32 refcount;
97 : } PrivateRefCountEntry;
98 :
99 : /* 64 bytes, about the size of a cache line on common systems */
100 : #define REFCOUNT_ARRAY_ENTRIES 8
101 :
102 : /*
103 : * Status of buffers to checkpoint for a particular tablespace, used
104 : * internally in BufferSync.
105 : */
106 : typedef struct CkptTsStatus
107 : {
108 : /* oid of the tablespace */
109 : Oid tsId;
110 :
111 : /*
112 : * Checkpoint progress for this tablespace. To make progress comparable
113 : * between tablespaces the progress is, for each tablespace, measured as a
114 : * number between 0 and the total number of to-be-checkpointed pages. Each
115 : * page checkpointed in this tablespace increments this space's progress
116 : * by progress_slice.
117 : */
118 : float8 progress;
119 : float8 progress_slice;
120 :
121 : /* number of to-be checkpointed pages in this tablespace */
122 : int num_to_scan;
123 : /* already processed pages in this tablespace */
124 : int num_scanned;
125 :
126 : /* current offset in CkptBufferIds for this tablespace */
127 : int index;
128 : } CkptTsStatus;
129 :
130 : /*
131 : * Type for array used to sort SMgrRelations
132 : *
133 : * FlushRelationsAllBuffers shares the same comparator function with
134 : * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
135 : * compatible.
136 : */
137 : typedef struct SMgrSortArray
138 : {
139 : RelFileLocator rlocator; /* This must be the first member */
140 : SMgrRelation srel;
141 : } SMgrSortArray;
142 :
143 : /* GUC variables */
144 : bool zero_damaged_pages = false;
145 : int bgwriter_lru_maxpages = 100;
146 : double bgwriter_lru_multiplier = 2.0;
147 : bool track_io_timing = false;
148 :
149 : /*
150 : * How many buffers PrefetchBuffer callers should try to stay ahead of their
151 : * ReadBuffer calls by. Zero means "never prefetch". This value is only used
152 : * for buffers not belonging to tablespaces that have their
153 : * effective_io_concurrency parameter set.
154 : */
155 : int effective_io_concurrency = DEFAULT_EFFECTIVE_IO_CONCURRENCY;
156 :
157 : /*
158 : * Like effective_io_concurrency, but used by maintenance code paths that might
159 : * benefit from a higher setting because they work on behalf of many sessions.
160 : * Overridden by the tablespace setting of the same name.
161 : */
162 : int maintenance_io_concurrency = DEFAULT_MAINTENANCE_IO_CONCURRENCY;
163 :
164 : /*
165 : * Limit on how many blocks should be handled in single I/O operations.
166 : * StartReadBuffers() callers should respect it, as should other operations
167 : * that call smgr APIs directly. It is computed as the minimum of underlying
168 : * GUCs io_combine_limit_guc and io_max_combine_limit.
169 : */
170 : int io_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
171 : int io_combine_limit_guc = DEFAULT_IO_COMBINE_LIMIT;
172 : int io_max_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
173 :
174 : /*
175 : * GUC variables about triggering kernel writeback for buffers written; OS
176 : * dependent defaults are set via the GUC mechanism.
177 : */
178 : int checkpoint_flush_after = DEFAULT_CHECKPOINT_FLUSH_AFTER;
179 : int bgwriter_flush_after = DEFAULT_BGWRITER_FLUSH_AFTER;
180 : int backend_flush_after = DEFAULT_BACKEND_FLUSH_AFTER;
181 :
182 : /* local state for LockBufferForCleanup */
183 : static BufferDesc *PinCountWaitBuf = NULL;
184 :
185 : /*
186 : * Backend-Private refcount management:
187 : *
188 : * Each buffer also has a private refcount that keeps track of the number of
189 : * times the buffer is pinned in the current process. This is so that the
190 : * shared refcount needs to be modified only once if a buffer is pinned more
191 : * than once by an individual backend. It's also used to check that no buffers
192 : * are still pinned at the end of transactions and when exiting.
193 : *
194 : *
195 : * To avoid - as we used to - requiring an array with NBuffers entries to keep
196 : * track of local buffers, we use a small sequentially searched array
197 : * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
198 : * keep track of backend local pins.
199 : *
200 : * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
201 : * refcounts are kept track of in the array; after that, new array entries
202 : * displace old ones into the hash table. That way a frequently used entry
203 : * can't get "stuck" in the hashtable while infrequent ones clog the array.
204 : *
205 : * Note that in most scenarios the number of pinned buffers will not exceed
206 : * REFCOUNT_ARRAY_ENTRIES.
207 : *
208 : *
209 : * To enter a buffer into the refcount tracking mechanism first reserve a free
210 : * entry using ReservePrivateRefCountEntry() and then later, if necessary,
211 : * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
212 : * memory allocations in NewPrivateRefCountEntry() which can be important
213 : * because in some scenarios it's called with a spinlock held...
214 : */
215 : static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES];
216 : static HTAB *PrivateRefCountHash = NULL;
217 : static int32 PrivateRefCountOverflowed = 0;
218 : static uint32 PrivateRefCountClock = 0;
219 : static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
220 :
221 : static uint32 MaxProportionalPins;
222 :
223 : static void ReservePrivateRefCountEntry(void);
224 : static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
225 : static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
226 : static inline int32 GetPrivateRefCount(Buffer buffer);
227 : static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
228 :
229 : /* ResourceOwner callbacks to hold in-progress I/Os and buffer pins */
230 : static void ResOwnerReleaseBufferIO(Datum res);
231 : static char *ResOwnerPrintBufferIO(Datum res);
232 : static void ResOwnerReleaseBufferPin(Datum res);
233 : static char *ResOwnerPrintBufferPin(Datum res);
234 :
235 : const ResourceOwnerDesc buffer_io_resowner_desc =
236 : {
237 : .name = "buffer io",
238 : .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
239 : .release_priority = RELEASE_PRIO_BUFFER_IOS,
240 : .ReleaseResource = ResOwnerReleaseBufferIO,
241 : .DebugPrint = ResOwnerPrintBufferIO
242 : };
243 :
244 : const ResourceOwnerDesc buffer_pin_resowner_desc =
245 : {
246 : .name = "buffer pin",
247 : .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
248 : .release_priority = RELEASE_PRIO_BUFFER_PINS,
249 : .ReleaseResource = ResOwnerReleaseBufferPin,
250 : .DebugPrint = ResOwnerPrintBufferPin
251 : };
252 :
253 : /*
254 : * Ensure that the PrivateRefCountArray has sufficient space to store one more
255 : * entry. This has to be called before using NewPrivateRefCountEntry() to fill
256 : * a new entry - but it's perfectly fine to not use a reserved entry.
257 : */
258 : static void
259 127577272 : ReservePrivateRefCountEntry(void)
260 : {
261 : /* Already reserved (or freed), nothing to do */
262 127577272 : if (ReservedRefCountEntry != NULL)
263 119053998 : return;
264 :
265 : /*
266 : * First search for a free entry the array, that'll be sufficient in the
267 : * majority of cases.
268 : */
269 : {
270 : int i;
271 :
272 21627172 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
273 : {
274 : PrivateRefCountEntry *res;
275 :
276 21280548 : res = &PrivateRefCountArray[i];
277 :
278 21280548 : if (res->buffer == InvalidBuffer)
279 : {
280 8176650 : ReservedRefCountEntry = res;
281 8176650 : return;
282 : }
283 : }
284 : }
285 :
286 : /*
287 : * No luck. All array entries are full. Move one array entry into the hash
288 : * table.
289 : */
290 : {
291 : /*
292 : * Move entry from the current clock position in the array into the
293 : * hashtable. Use that slot.
294 : */
295 : PrivateRefCountEntry *hashent;
296 : bool found;
297 :
298 : /* select victim slot */
299 346624 : ReservedRefCountEntry =
300 346624 : &PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
301 :
302 : /* Better be used, otherwise we shouldn't get here. */
303 : Assert(ReservedRefCountEntry->buffer != InvalidBuffer);
304 :
305 : /* enter victim array entry into hashtable */
306 346624 : hashent = hash_search(PrivateRefCountHash,
307 346624 : &(ReservedRefCountEntry->buffer),
308 : HASH_ENTER,
309 : &found);
310 : Assert(!found);
311 346624 : hashent->refcount = ReservedRefCountEntry->refcount;
312 :
313 : /* clear the now free array slot */
314 346624 : ReservedRefCountEntry->buffer = InvalidBuffer;
315 346624 : ReservedRefCountEntry->refcount = 0;
316 :
317 346624 : PrivateRefCountOverflowed++;
318 : }
319 : }
320 :
321 : /*
322 : * Fill a previously reserved refcount entry.
323 : */
324 : static PrivateRefCountEntry *
325 116152618 : NewPrivateRefCountEntry(Buffer buffer)
326 : {
327 : PrivateRefCountEntry *res;
328 :
329 : /* only allowed to be called when a reservation has been made */
330 : Assert(ReservedRefCountEntry != NULL);
331 :
332 : /* use up the reserved entry */
333 116152618 : res = ReservedRefCountEntry;
334 116152618 : ReservedRefCountEntry = NULL;
335 :
336 : /* and fill it */
337 116152618 : res->buffer = buffer;
338 116152618 : res->refcount = 0;
339 :
340 116152618 : return res;
341 : }
342 :
343 : /*
344 : * Return the PrivateRefCount entry for the passed buffer.
345 : *
346 : * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
347 : * do_move is true, and the entry resides in the hashtable the entry is
348 : * optimized for frequent access by moving it to the array.
349 : */
350 : static PrivateRefCountEntry *
351 286022092 : GetPrivateRefCountEntry(Buffer buffer, bool do_move)
352 : {
353 : PrivateRefCountEntry *res;
354 : int i;
355 :
356 : Assert(BufferIsValid(buffer));
357 : Assert(!BufferIsLocal(buffer));
358 :
359 : /*
360 : * First search for references in the array, that'll be sufficient in the
361 : * majority of cases.
362 : */
363 1352512774 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
364 : {
365 1240353974 : res = &PrivateRefCountArray[i];
366 :
367 1240353974 : if (res->buffer == buffer)
368 173863292 : return res;
369 : }
370 :
371 : /*
372 : * By here we know that the buffer, if already pinned, isn't residing in
373 : * the array.
374 : *
375 : * Only look up the buffer in the hashtable if we've previously overflowed
376 : * into it.
377 : */
378 112158800 : if (PrivateRefCountOverflowed == 0)
379 111294998 : return NULL;
380 :
381 863802 : res = hash_search(PrivateRefCountHash, &buffer, HASH_FIND, NULL);
382 :
383 863802 : if (res == NULL)
384 513786 : return NULL;
385 350016 : else if (!do_move)
386 : {
387 : /* caller doesn't want us to move the hash entry into the array */
388 310906 : return res;
389 : }
390 : else
391 : {
392 : /* move buffer from hashtable into the free array slot */
393 : bool found;
394 : PrivateRefCountEntry *free;
395 :
396 : /* Ensure there's a free array slot */
397 39110 : ReservePrivateRefCountEntry();
398 :
399 : /* Use up the reserved slot */
400 : Assert(ReservedRefCountEntry != NULL);
401 39110 : free = ReservedRefCountEntry;
402 39110 : ReservedRefCountEntry = NULL;
403 : Assert(free->buffer == InvalidBuffer);
404 :
405 : /* and fill it */
406 39110 : free->buffer = buffer;
407 39110 : free->refcount = res->refcount;
408 :
409 : /* delete from hashtable */
410 39110 : hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
411 : Assert(found);
412 : Assert(PrivateRefCountOverflowed > 0);
413 39110 : PrivateRefCountOverflowed--;
414 :
415 39110 : return free;
416 : }
417 : }
418 :
419 : /*
420 : * Returns how many times the passed buffer is pinned by this backend.
421 : *
422 : * Only works for shared memory buffers!
423 : */
424 : static inline int32
425 5110294 : GetPrivateRefCount(Buffer buffer)
426 : {
427 : PrivateRefCountEntry *ref;
428 :
429 : Assert(BufferIsValid(buffer));
430 : Assert(!BufferIsLocal(buffer));
431 :
432 : /*
433 : * Not moving the entry - that's ok for the current users, but we might
434 : * want to change this one day.
435 : */
436 5110294 : ref = GetPrivateRefCountEntry(buffer, false);
437 :
438 5110294 : if (ref == NULL)
439 8832 : return 0;
440 5101462 : return ref->refcount;
441 : }
442 :
443 : /*
444 : * Release resources used to track the reference count of a buffer which we no
445 : * longer have pinned and don't want to pin again immediately.
446 : */
447 : static void
448 116152618 : ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
449 : {
450 : Assert(ref->refcount == 0);
451 :
452 116152618 : if (ref >= &PrivateRefCountArray[0] &&
453 : ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
454 : {
455 115845104 : ref->buffer = InvalidBuffer;
456 :
457 : /*
458 : * Mark the just used entry as reserved - in many scenarios that
459 : * allows us to avoid ever having to search the array/hash for free
460 : * entries.
461 : */
462 115845104 : ReservedRefCountEntry = ref;
463 : }
464 : else
465 : {
466 : bool found;
467 307514 : Buffer buffer = ref->buffer;
468 :
469 307514 : hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
470 : Assert(found);
471 : Assert(PrivateRefCountOverflowed > 0);
472 307514 : PrivateRefCountOverflowed--;
473 : }
474 116152618 : }
475 :
476 : /*
477 : * BufferIsPinned
478 : * True iff the buffer is pinned (also checks for valid buffer number).
479 : *
480 : * NOTE: what we check here is that *this* backend holds a pin on
481 : * the buffer. We do not care whether some other backend does.
482 : */
483 : #define BufferIsPinned(bufnum) \
484 : ( \
485 : !BufferIsValid(bufnum) ? \
486 : false \
487 : : \
488 : BufferIsLocal(bufnum) ? \
489 : (LocalRefCount[-(bufnum) - 1] > 0) \
490 : : \
491 : (GetPrivateRefCount(bufnum) > 0) \
492 : )
493 :
494 :
495 : static Buffer ReadBuffer_common(Relation rel,
496 : SMgrRelation smgr, char smgr_persistence,
497 : ForkNumber forkNum, BlockNumber blockNum,
498 : ReadBufferMode mode, BufferAccessStrategy strategy);
499 : static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr,
500 : ForkNumber fork,
501 : BufferAccessStrategy strategy,
502 : uint32 flags,
503 : uint32 extend_by,
504 : BlockNumber extend_upto,
505 : Buffer *buffers,
506 : uint32 *extended_by);
507 : static BlockNumber ExtendBufferedRelShared(BufferManagerRelation bmr,
508 : ForkNumber fork,
509 : BufferAccessStrategy strategy,
510 : uint32 flags,
511 : uint32 extend_by,
512 : BlockNumber extend_upto,
513 : Buffer *buffers,
514 : uint32 *extended_by);
515 : static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
516 : static void PinBuffer_Locked(BufferDesc *buf);
517 : static void UnpinBuffer(BufferDesc *buf);
518 : static void UnpinBufferNoOwner(BufferDesc *buf);
519 : static void BufferSync(int flags);
520 : static uint32 WaitBufHdrUnlocked(BufferDesc *buf);
521 : static int SyncOneBuffer(int buf_id, bool skip_recently_used,
522 : WritebackContext *wb_context);
523 : static void WaitIO(BufferDesc *buf);
524 : static void AbortBufferIO(Buffer buffer);
525 : static void shared_buffer_write_error_callback(void *arg);
526 : static void local_buffer_write_error_callback(void *arg);
527 : static inline BufferDesc *BufferAlloc(SMgrRelation smgr,
528 : char relpersistence,
529 : ForkNumber forkNum,
530 : BlockNumber blockNum,
531 : BufferAccessStrategy strategy,
532 : bool *foundPtr, IOContext io_context);
533 : static bool AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress);
534 : static void CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete);
535 : static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
536 : static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
537 : IOObject io_object, IOContext io_context);
538 : static void FindAndDropRelationBuffers(RelFileLocator rlocator,
539 : ForkNumber forkNum,
540 : BlockNumber nForkBlock,
541 : BlockNumber firstDelBlock);
542 : static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
543 : RelFileLocator dstlocator,
544 : ForkNumber forkNum, bool permanent);
545 : static void AtProcExit_Buffers(int code, Datum arg);
546 : static void CheckForBufferLeaks(void);
547 : #ifdef USE_ASSERT_CHECKING
548 : static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
549 : void *unused_context);
550 : #endif
551 : static int rlocator_comparator(const void *p1, const void *p2);
552 : static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
553 : static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
554 : static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
555 :
556 :
557 : /*
558 : * Implementation of PrefetchBuffer() for shared buffers.
559 : */
560 : PrefetchBufferResult
561 62256 : PrefetchSharedBuffer(SMgrRelation smgr_reln,
562 : ForkNumber forkNum,
563 : BlockNumber blockNum)
564 : {
565 62256 : PrefetchBufferResult result = {InvalidBuffer, false};
566 : BufferTag newTag; /* identity of requested block */
567 : uint32 newHash; /* hash value for newTag */
568 : LWLock *newPartitionLock; /* buffer partition lock for it */
569 : int buf_id;
570 :
571 : Assert(BlockNumberIsValid(blockNum));
572 :
573 : /* create a tag so we can lookup the buffer */
574 62256 : InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
575 : forkNum, blockNum);
576 :
577 : /* determine its hash code and partition lock ID */
578 62256 : newHash = BufTableHashCode(&newTag);
579 62256 : newPartitionLock = BufMappingPartitionLock(newHash);
580 :
581 : /* see if the block is in the buffer pool already */
582 62256 : LWLockAcquire(newPartitionLock, LW_SHARED);
583 62256 : buf_id = BufTableLookup(&newTag, newHash);
584 62256 : LWLockRelease(newPartitionLock);
585 :
586 : /* If not in buffers, initiate prefetch */
587 62256 : if (buf_id < 0)
588 : {
589 : #ifdef USE_PREFETCH
590 : /*
591 : * Try to initiate an asynchronous read. This returns false in
592 : * recovery if the relation file doesn't exist.
593 : */
594 34734 : if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
595 17144 : smgrprefetch(smgr_reln, forkNum, blockNum, 1))
596 : {
597 17144 : result.initiated_io = true;
598 : }
599 : #endif /* USE_PREFETCH */
600 : }
601 : else
602 : {
603 : /*
604 : * Report the buffer it was in at that time. The caller may be able
605 : * to avoid a buffer table lookup, but it's not pinned and it must be
606 : * rechecked!
607 : */
608 44666 : result.recent_buffer = buf_id + 1;
609 : }
610 :
611 : /*
612 : * If the block *is* in buffers, we do nothing. This is not really ideal:
613 : * the block might be just about to be evicted, which would be stupid
614 : * since we know we are going to need it soon. But the only easy answer
615 : * is to bump the usage_count, which does not seem like a great solution:
616 : * when the caller does ultimately touch the block, usage_count would get
617 : * bumped again, resulting in too much favoritism for blocks that are
618 : * involved in a prefetch sequence. A real fix would involve some
619 : * additional per-buffer state, and it's not clear that there's enough of
620 : * a problem to justify that.
621 : */
622 :
623 62256 : return result;
624 : }
625 :
626 : /*
627 : * PrefetchBuffer -- initiate asynchronous read of a block of a relation
628 : *
629 : * This is named by analogy to ReadBuffer but doesn't actually allocate a
630 : * buffer. Instead it tries to ensure that a future ReadBuffer for the given
631 : * block will not be delayed by the I/O. Prefetching is optional.
632 : *
633 : * There are three possible outcomes:
634 : *
635 : * 1. If the block is already cached, the result includes a valid buffer that
636 : * could be used by the caller to avoid the need for a later buffer lookup, but
637 : * it's not pinned, so the caller must recheck it.
638 : *
639 : * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
640 : * true. Currently there is no way to know if the data was already cached by
641 : * the kernel and therefore didn't really initiate I/O, and no way to know when
642 : * the I/O completes other than using synchronous ReadBuffer().
643 : *
644 : * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and
645 : * USE_PREFETCH is not defined (this build doesn't support prefetching due to
646 : * lack of a kernel facility), direct I/O is enabled, or the underlying
647 : * relation file wasn't found and we are in recovery. (If the relation file
648 : * wasn't found and we are not in recovery, an error is raised).
649 : */
650 : PrefetchBufferResult
651 41182 : PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
652 : {
653 : Assert(RelationIsValid(reln));
654 : Assert(BlockNumberIsValid(blockNum));
655 :
656 41182 : if (RelationUsesLocalBuffers(reln))
657 : {
658 : /* see comments in ReadBufferExtended */
659 1566 : if (RELATION_IS_OTHER_TEMP(reln))
660 0 : ereport(ERROR,
661 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
662 : errmsg("cannot access temporary tables of other sessions")));
663 :
664 : /* pass it off to localbuf.c */
665 1566 : return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
666 : }
667 : else
668 : {
669 : /* pass it to the shared buffer version */
670 39616 : return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
671 : }
672 : }
673 :
674 : /*
675 : * ReadRecentBuffer -- try to pin a block in a recently observed buffer
676 : *
677 : * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
678 : * successful. Return true if the buffer is valid and still has the expected
679 : * tag. In that case, the buffer is pinned and the usage count is bumped.
680 : */
681 : bool
682 8846 : ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
683 : Buffer recent_buffer)
684 : {
685 : BufferDesc *bufHdr;
686 : BufferTag tag;
687 : uint32 buf_state;
688 : bool have_private_ref;
689 :
690 : Assert(BufferIsValid(recent_buffer));
691 :
692 8846 : ResourceOwnerEnlarge(CurrentResourceOwner);
693 8846 : ReservePrivateRefCountEntry();
694 8846 : InitBufferTag(&tag, &rlocator, forkNum, blockNum);
695 :
696 8846 : if (BufferIsLocal(recent_buffer))
697 : {
698 64 : int b = -recent_buffer - 1;
699 :
700 64 : bufHdr = GetLocalBufferDescriptor(b);
701 64 : buf_state = pg_atomic_read_u32(&bufHdr->state);
702 :
703 : /* Is it still valid and holding the right tag? */
704 64 : if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
705 : {
706 64 : PinLocalBuffer(bufHdr, true);
707 :
708 64 : pgBufferUsage.local_blks_hit++;
709 :
710 64 : return true;
711 : }
712 : }
713 : else
714 : {
715 8782 : bufHdr = GetBufferDescriptor(recent_buffer - 1);
716 8782 : have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
717 :
718 : /*
719 : * Do we already have this buffer pinned with a private reference? If
720 : * so, it must be valid and it is safe to check the tag without
721 : * locking. If not, we have to lock the header first and then check.
722 : */
723 8782 : if (have_private_ref)
724 0 : buf_state = pg_atomic_read_u32(&bufHdr->state);
725 : else
726 8782 : buf_state = LockBufHdr(bufHdr);
727 :
728 8782 : if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
729 : {
730 : /*
731 : * It's now safe to pin the buffer. We can't pin first and ask
732 : * questions later, because it might confuse code paths like
733 : * InvalidateBuffer() if we pinned a random non-matching buffer.
734 : */
735 8696 : if (have_private_ref)
736 0 : PinBuffer(bufHdr, NULL); /* bump pin count */
737 : else
738 8696 : PinBuffer_Locked(bufHdr); /* pin for first time */
739 :
740 8696 : pgBufferUsage.shared_blks_hit++;
741 :
742 8696 : return true;
743 : }
744 :
745 : /* If we locked the header above, now unlock. */
746 86 : if (!have_private_ref)
747 86 : UnlockBufHdr(bufHdr, buf_state);
748 : }
749 :
750 86 : return false;
751 : }
752 :
753 : /*
754 : * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
755 : * fork with RBM_NORMAL mode and default strategy.
756 : */
757 : Buffer
758 85738580 : ReadBuffer(Relation reln, BlockNumber blockNum)
759 : {
760 85738580 : return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
761 : }
762 :
763 : /*
764 : * ReadBufferExtended -- returns a buffer containing the requested
765 : * block of the requested relation. If the blknum
766 : * requested is P_NEW, extend the relation file and
767 : * allocate a new block. (Caller is responsible for
768 : * ensuring that only one backend tries to extend a
769 : * relation at the same time!)
770 : *
771 : * Returns: the buffer number for the buffer containing
772 : * the block read. The returned buffer has been pinned.
773 : * Does not return on error --- elog's instead.
774 : *
775 : * Assume when this function is called, that reln has been opened already.
776 : *
777 : * In RBM_NORMAL mode, the page is read from disk, and the page header is
778 : * validated. An error is thrown if the page header is not valid. (But
779 : * note that an all-zero page is considered "valid"; see
780 : * PageIsVerified().)
781 : *
782 : * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
783 : * valid, the page is zeroed instead of throwing an error. This is intended
784 : * for non-critical data, where the caller is prepared to repair errors.
785 : *
786 : * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
787 : * filled with zeros instead of reading it from disk. Useful when the caller
788 : * is going to fill the page from scratch, since this saves I/O and avoids
789 : * unnecessary failure if the page-on-disk has corrupt page headers.
790 : * The page is returned locked to ensure that the caller has a chance to
791 : * initialize the page before it's made visible to others.
792 : * Caution: do not use this mode to read a page that is beyond the relation's
793 : * current physical EOF; that is likely to cause problems in md.c when
794 : * the page is modified and written out. P_NEW is OK, though.
795 : *
796 : * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
797 : * a cleanup-strength lock on the page.
798 : *
799 : * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
800 : *
801 : * If strategy is not NULL, a nondefault buffer access strategy is used.
802 : * See buffer/README for details.
803 : */
804 : inline Buffer
805 102983750 : ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
806 : ReadBufferMode mode, BufferAccessStrategy strategy)
807 : {
808 : Buffer buf;
809 :
810 : /*
811 : * Reject attempts to read non-local temporary relations; we would be
812 : * likely to get wrong data since we have no visibility into the owning
813 : * session's local buffers.
814 : */
815 102983750 : if (RELATION_IS_OTHER_TEMP(reln))
816 0 : ereport(ERROR,
817 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
818 : errmsg("cannot access temporary tables of other sessions")));
819 :
820 : /*
821 : * Read the buffer, and update pgstat counters to reflect a cache hit or
822 : * miss.
823 : */
824 102983750 : buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
825 : forkNum, blockNum, mode, strategy);
826 :
827 102983704 : return buf;
828 : }
829 :
830 :
831 : /*
832 : * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
833 : * a relcache entry for the relation.
834 : *
835 : * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
836 : * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
837 : * cannot be used for temporary relations (and making that work might be
838 : * difficult, unless we only want to read temporary relations for our own
839 : * ProcNumber).
840 : */
841 : Buffer
842 11326564 : ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
843 : BlockNumber blockNum, ReadBufferMode mode,
844 : BufferAccessStrategy strategy, bool permanent)
845 : {
846 11326564 : SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
847 :
848 11326564 : return ReadBuffer_common(NULL, smgr,
849 : permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
850 : forkNum, blockNum,
851 : mode, strategy);
852 : }
853 :
854 : /*
855 : * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
856 : */
857 : Buffer
858 92880 : ExtendBufferedRel(BufferManagerRelation bmr,
859 : ForkNumber forkNum,
860 : BufferAccessStrategy strategy,
861 : uint32 flags)
862 : {
863 : Buffer buf;
864 92880 : uint32 extend_by = 1;
865 :
866 92880 : ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
867 : &buf, &extend_by);
868 :
869 92880 : return buf;
870 : }
871 :
872 : /*
873 : * Extend relation by multiple blocks.
874 : *
875 : * Tries to extend the relation by extend_by blocks. Depending on the
876 : * availability of resources the relation may end up being extended by a
877 : * smaller number of pages (unless an error is thrown, always by at least one
878 : * page). *extended_by is updated to the number of pages the relation has been
879 : * extended to.
880 : *
881 : * buffers needs to be an array that is at least extend_by long. Upon
882 : * completion, the first extend_by array elements will point to a pinned
883 : * buffer.
884 : *
885 : * If EB_LOCK_FIRST is part of flags, the first returned buffer is
886 : * locked. This is useful for callers that want a buffer that is guaranteed to
887 : * be empty.
888 : */
889 : BlockNumber
890 321188 : ExtendBufferedRelBy(BufferManagerRelation bmr,
891 : ForkNumber fork,
892 : BufferAccessStrategy strategy,
893 : uint32 flags,
894 : uint32 extend_by,
895 : Buffer *buffers,
896 : uint32 *extended_by)
897 : {
898 : Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
899 : Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
900 : Assert(extend_by > 0);
901 :
902 321188 : if (bmr.smgr == NULL)
903 : {
904 321188 : bmr.smgr = RelationGetSmgr(bmr.rel);
905 321188 : bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
906 : }
907 :
908 321188 : return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
909 : extend_by, InvalidBlockNumber,
910 : buffers, extended_by);
911 : }
912 :
913 : /*
914 : * Extend the relation so it is at least extend_to blocks large, return buffer
915 : * (extend_to - 1).
916 : *
917 : * This is useful for callers that want to write a specific page, regardless
918 : * of the current size of the relation (e.g. useful for visibilitymap and for
919 : * crash recovery).
920 : */
921 : Buffer
922 102542 : ExtendBufferedRelTo(BufferManagerRelation bmr,
923 : ForkNumber fork,
924 : BufferAccessStrategy strategy,
925 : uint32 flags,
926 : BlockNumber extend_to,
927 : ReadBufferMode mode)
928 : {
929 : BlockNumber current_size;
930 102542 : uint32 extended_by = 0;
931 102542 : Buffer buffer = InvalidBuffer;
932 : Buffer buffers[64];
933 :
934 : Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
935 : Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
936 : Assert(extend_to != InvalidBlockNumber && extend_to > 0);
937 :
938 102542 : if (bmr.smgr == NULL)
939 : {
940 14246 : bmr.smgr = RelationGetSmgr(bmr.rel);
941 14246 : bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
942 : }
943 :
944 : /*
945 : * If desired, create the file if it doesn't exist. If
946 : * smgr_cached_nblocks[fork] is positive then it must exist, no need for
947 : * an smgrexists call.
948 : */
949 102542 : if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
950 14246 : (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
951 36 : bmr.smgr->smgr_cached_nblocks[fork] == InvalidBlockNumber) &&
952 14210 : !smgrexists(bmr.smgr, fork))
953 : {
954 14178 : LockRelationForExtension(bmr.rel, ExclusiveLock);
955 :
956 : /* recheck, fork might have been created concurrently */
957 14178 : if (!smgrexists(bmr.smgr, fork))
958 14172 : smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
959 :
960 14178 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
961 : }
962 :
963 : /*
964 : * If requested, invalidate size cache, so that smgrnblocks asks the
965 : * kernel.
966 : */
967 102542 : if (flags & EB_CLEAR_SIZE_CACHE)
968 14246 : bmr.smgr->smgr_cached_nblocks[fork] = InvalidBlockNumber;
969 :
970 : /*
971 : * Estimate how many pages we'll need to extend by. This avoids acquiring
972 : * unnecessarily many victim buffers.
973 : */
974 102542 : current_size = smgrnblocks(bmr.smgr, fork);
975 :
976 : /*
977 : * Since no-one else can be looking at the page contents yet, there is no
978 : * difference between an exclusive lock and a cleanup-strength lock. Note
979 : * that we pass the original mode to ReadBuffer_common() below, when
980 : * falling back to reading the buffer to a concurrent relation extension.
981 : */
982 102542 : if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
983 87568 : flags |= EB_LOCK_TARGET;
984 :
985 209260 : while (current_size < extend_to)
986 : {
987 106718 : uint32 num_pages = lengthof(buffers);
988 : BlockNumber first_block;
989 :
990 106718 : if ((uint64) current_size + num_pages > extend_to)
991 106586 : num_pages = extend_to - current_size;
992 :
993 106718 : first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
994 : num_pages, extend_to,
995 : buffers, &extended_by);
996 :
997 106718 : current_size = first_block + extended_by;
998 : Assert(num_pages != 0 || current_size >= extend_to);
999 :
1000 227956 : for (uint32 i = 0; i < extended_by; i++)
1001 : {
1002 121238 : if (first_block + i != extend_to - 1)
1003 18712 : ReleaseBuffer(buffers[i]);
1004 : else
1005 102526 : buffer = buffers[i];
1006 : }
1007 : }
1008 :
1009 : /*
1010 : * It's possible that another backend concurrently extended the relation.
1011 : * In that case read the buffer.
1012 : *
1013 : * XXX: Should we control this via a flag?
1014 : */
1015 102542 : if (buffer == InvalidBuffer)
1016 : {
1017 : Assert(extended_by == 0);
1018 16 : buffer = ReadBuffer_common(bmr.rel, bmr.smgr, bmr.relpersistence,
1019 : fork, extend_to - 1, mode, strategy);
1020 : }
1021 :
1022 102542 : return buffer;
1023 : }
1024 :
1025 : /*
1026 : * Lock and optionally zero a buffer, as part of the implementation of
1027 : * RBM_ZERO_AND_LOCK or RBM_ZERO_AND_CLEANUP_LOCK. The buffer must be already
1028 : * pinned. If the buffer is not already valid, it is zeroed and made valid.
1029 : */
1030 : static void
1031 624566 : ZeroAndLockBuffer(Buffer buffer, ReadBufferMode mode, bool already_valid)
1032 : {
1033 : BufferDesc *bufHdr;
1034 : bool need_to_zero;
1035 624566 : bool isLocalBuf = BufferIsLocal(buffer);
1036 :
1037 : Assert(mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
1038 :
1039 624566 : if (already_valid)
1040 : {
1041 : /*
1042 : * If the caller already knew the buffer was valid, we can skip some
1043 : * header interaction. The caller just wants to lock the buffer.
1044 : */
1045 73920 : need_to_zero = false;
1046 : }
1047 550646 : else if (isLocalBuf)
1048 : {
1049 : /* Simple case for non-shared buffers. */
1050 48 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1051 48 : need_to_zero = StartLocalBufferIO(bufHdr, true, false);
1052 : }
1053 : else
1054 : {
1055 : /*
1056 : * Take BM_IO_IN_PROGRESS, or discover that BM_VALID has been set
1057 : * concurrently. Even though we aren't doing I/O, that ensures that
1058 : * we don't zero a page that someone else has pinned. An exclusive
1059 : * content lock wouldn't be enough, because readers are allowed to
1060 : * drop the content lock after determining that a tuple is visible
1061 : * (see buffer access rules in README).
1062 : */
1063 550598 : bufHdr = GetBufferDescriptor(buffer - 1);
1064 550598 : need_to_zero = StartBufferIO(bufHdr, true, false);
1065 : }
1066 :
1067 624566 : if (need_to_zero)
1068 : {
1069 550646 : memset(BufferGetPage(buffer), 0, BLCKSZ);
1070 :
1071 : /*
1072 : * Grab the buffer content lock before marking the page as valid, to
1073 : * make sure that no other backend sees the zeroed page before the
1074 : * caller has had a chance to initialize it.
1075 : *
1076 : * Since no-one else can be looking at the page contents yet, there is
1077 : * no difference between an exclusive lock and a cleanup-strength
1078 : * lock. (Note that we cannot use LockBuffer() or
1079 : * LockBufferForCleanup() here, because they assert that the buffer is
1080 : * already valid.)
1081 : */
1082 550646 : if (!isLocalBuf)
1083 550598 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_EXCLUSIVE);
1084 :
1085 : /* Set BM_VALID, terminate IO, and wake up any waiters */
1086 550646 : if (isLocalBuf)
1087 48 : TerminateLocalBufferIO(bufHdr, false, BM_VALID, false);
1088 : else
1089 550598 : TerminateBufferIO(bufHdr, false, BM_VALID, true, false);
1090 : }
1091 73920 : else if (!isLocalBuf)
1092 : {
1093 : /*
1094 : * The buffer is valid, so we can't zero it. The caller still expects
1095 : * the page to be locked on return.
1096 : */
1097 73880 : if (mode == RBM_ZERO_AND_LOCK)
1098 73782 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
1099 : else
1100 98 : LockBufferForCleanup(buffer);
1101 : }
1102 624566 : }
1103 :
1104 : /*
1105 : * Pin a buffer for a given block. *foundPtr is set to true if the block was
1106 : * already present, or false if more work is required to either read it in or
1107 : * zero it.
1108 : */
1109 : static pg_attribute_always_inline Buffer
1110 121930572 : PinBufferForBlock(Relation rel,
1111 : SMgrRelation smgr,
1112 : char persistence,
1113 : ForkNumber forkNum,
1114 : BlockNumber blockNum,
1115 : BufferAccessStrategy strategy,
1116 : bool *foundPtr)
1117 : {
1118 : BufferDesc *bufHdr;
1119 : IOContext io_context;
1120 : IOObject io_object;
1121 :
1122 : Assert(blockNum != P_NEW);
1123 :
1124 : /* Persistence should be set before */
1125 : Assert((persistence == RELPERSISTENCE_TEMP ||
1126 : persistence == RELPERSISTENCE_PERMANENT ||
1127 : persistence == RELPERSISTENCE_UNLOGGED));
1128 :
1129 121930572 : if (persistence == RELPERSISTENCE_TEMP)
1130 : {
1131 2542086 : io_context = IOCONTEXT_NORMAL;
1132 2542086 : io_object = IOOBJECT_TEMP_RELATION;
1133 : }
1134 : else
1135 : {
1136 119388486 : io_context = IOContextForStrategy(strategy);
1137 119388486 : io_object = IOOBJECT_RELATION;
1138 : }
1139 :
1140 : TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
1141 : smgr->smgr_rlocator.locator.spcOid,
1142 : smgr->smgr_rlocator.locator.dbOid,
1143 : smgr->smgr_rlocator.locator.relNumber,
1144 : smgr->smgr_rlocator.backend);
1145 :
1146 121930572 : if (persistence == RELPERSISTENCE_TEMP)
1147 : {
1148 2542086 : bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, foundPtr);
1149 2542074 : if (*foundPtr)
1150 2525316 : pgBufferUsage.local_blks_hit++;
1151 : }
1152 : else
1153 : {
1154 119388486 : bufHdr = BufferAlloc(smgr, persistence, forkNum, blockNum,
1155 : strategy, foundPtr, io_context);
1156 119388486 : if (*foundPtr)
1157 116096196 : pgBufferUsage.shared_blks_hit++;
1158 : }
1159 121930560 : if (rel)
1160 : {
1161 : /*
1162 : * While pgBufferUsage's "read" counter isn't bumped unless we reach
1163 : * WaitReadBuffers() (so, not for hits, and not for buffers that are
1164 : * zeroed instead), the per-relation stats always count them.
1165 : */
1166 110144136 : pgstat_count_buffer_read(rel);
1167 110144136 : if (*foundPtr)
1168 107737150 : pgstat_count_buffer_hit(rel);
1169 : }
1170 121930560 : if (*foundPtr)
1171 : {
1172 118621512 : pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
1173 118621512 : if (VacuumCostActive)
1174 3440240 : VacuumCostBalance += VacuumCostPageHit;
1175 :
1176 : TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1177 : smgr->smgr_rlocator.locator.spcOid,
1178 : smgr->smgr_rlocator.locator.dbOid,
1179 : smgr->smgr_rlocator.locator.relNumber,
1180 : smgr->smgr_rlocator.backend,
1181 : true);
1182 : }
1183 :
1184 121930560 : return BufferDescriptorGetBuffer(bufHdr);
1185 : }
1186 :
1187 : /*
1188 : * ReadBuffer_common -- common logic for all ReadBuffer variants
1189 : *
1190 : * smgr is required, rel is optional unless using P_NEW.
1191 : */
1192 : static pg_attribute_always_inline Buffer
1193 114310330 : ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence,
1194 : ForkNumber forkNum,
1195 : BlockNumber blockNum, ReadBufferMode mode,
1196 : BufferAccessStrategy strategy)
1197 : {
1198 : ReadBuffersOperation operation;
1199 : Buffer buffer;
1200 : int flags;
1201 : char persistence;
1202 :
1203 : /*
1204 : * Backward compatibility path, most code should use ExtendBufferedRel()
1205 : * instead, as acquiring the extension lock inside ExtendBufferedRel()
1206 : * scales a lot better.
1207 : */
1208 114310330 : if (unlikely(blockNum == P_NEW))
1209 : {
1210 574 : uint32 flags = EB_SKIP_EXTENSION_LOCK;
1211 :
1212 : /*
1213 : * Since no-one else can be looking at the page contents yet, there is
1214 : * no difference between an exclusive lock and a cleanup-strength
1215 : * lock.
1216 : */
1217 574 : if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
1218 0 : flags |= EB_LOCK_FIRST;
1219 :
1220 574 : return ExtendBufferedRel(BMR_REL(rel), forkNum, strategy, flags);
1221 : }
1222 :
1223 114309756 : if (rel)
1224 102983192 : persistence = rel->rd_rel->relpersistence;
1225 : else
1226 11326564 : persistence = smgr_persistence;
1227 :
1228 114309756 : if (unlikely(mode == RBM_ZERO_AND_CLEANUP_LOCK ||
1229 : mode == RBM_ZERO_AND_LOCK))
1230 : {
1231 : bool found;
1232 :
1233 624566 : buffer = PinBufferForBlock(rel, smgr, persistence,
1234 : forkNum, blockNum, strategy, &found);
1235 624566 : ZeroAndLockBuffer(buffer, mode, found);
1236 624566 : return buffer;
1237 : }
1238 :
1239 : /*
1240 : * Signal that we are going to immediately wait. If we're immediately
1241 : * waiting, there is no benefit in actually executing the IO
1242 : * asynchronously, it would just add dispatch overhead.
1243 : */
1244 113685190 : flags = READ_BUFFERS_SYNCHRONOUSLY;
1245 113685190 : if (mode == RBM_ZERO_ON_ERROR)
1246 2331882 : flags |= READ_BUFFERS_ZERO_ON_ERROR;
1247 113685190 : operation.smgr = smgr;
1248 113685190 : operation.rel = rel;
1249 113685190 : operation.persistence = persistence;
1250 113685190 : operation.forknum = forkNum;
1251 113685190 : operation.strategy = strategy;
1252 113685190 : if (StartReadBuffer(&operation,
1253 : &buffer,
1254 : blockNum,
1255 : flags))
1256 1348934 : WaitReadBuffers(&operation);
1257 :
1258 113685144 : return buffer;
1259 : }
1260 :
1261 : static pg_attribute_always_inline bool
1262 120984182 : StartReadBuffersImpl(ReadBuffersOperation *operation,
1263 : Buffer *buffers,
1264 : BlockNumber blockNum,
1265 : int *nblocks,
1266 : int flags,
1267 : bool allow_forwarding)
1268 : {
1269 120984182 : int actual_nblocks = *nblocks;
1270 120984182 : int maxcombine = 0;
1271 : bool did_start_io;
1272 :
1273 : Assert(*nblocks == 1 || allow_forwarding);
1274 : Assert(*nblocks > 0);
1275 : Assert(*nblocks <= MAX_IO_COMBINE_LIMIT);
1276 :
1277 123742584 : for (int i = 0; i < actual_nblocks; ++i)
1278 : {
1279 : bool found;
1280 :
1281 121308818 : if (allow_forwarding && buffers[i] != InvalidBuffer)
1282 2812 : {
1283 : BufferDesc *bufHdr;
1284 :
1285 : /*
1286 : * This is a buffer that was pinned by an earlier call to
1287 : * StartReadBuffers(), but couldn't be handled in one operation at
1288 : * that time. The operation was split, and the caller has passed
1289 : * an already pinned buffer back to us to handle the rest of the
1290 : * operation. It must continue at the expected block number.
1291 : */
1292 : Assert(BufferGetBlockNumber(buffers[i]) == blockNum + i);
1293 :
1294 : /*
1295 : * It might be an already valid buffer (a hit) that followed the
1296 : * final contiguous block of an earlier I/O (a miss) marking the
1297 : * end of it, or a buffer that some other backend has since made
1298 : * valid by performing the I/O for us, in which case we can handle
1299 : * it as a hit now. It is safe to check for a BM_VALID flag with
1300 : * a relaxed load, because we got a fresh view of it while pinning
1301 : * it in the previous call.
1302 : *
1303 : * On the other hand if we don't see BM_VALID yet, it must be an
1304 : * I/O that was split by the previous call and we need to try to
1305 : * start a new I/O from this block. We're also racing against any
1306 : * other backend that might start the I/O or even manage to mark
1307 : * it BM_VALID after this check, but StartBufferIO() will handle
1308 : * those cases.
1309 : */
1310 2812 : if (BufferIsLocal(buffers[i]))
1311 4 : bufHdr = GetLocalBufferDescriptor(-buffers[i] - 1);
1312 : else
1313 2808 : bufHdr = GetBufferDescriptor(buffers[i] - 1);
1314 : Assert(pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID);
1315 2812 : found = pg_atomic_read_u32(&bufHdr->state) & BM_VALID;
1316 : }
1317 : else
1318 : {
1319 121305994 : buffers[i] = PinBufferForBlock(operation->rel,
1320 121306006 : operation->smgr,
1321 121306006 : operation->persistence,
1322 : operation->forknum,
1323 : blockNum + i,
1324 : operation->strategy,
1325 : &found);
1326 : }
1327 :
1328 121308806 : if (found)
1329 : {
1330 : /*
1331 : * We have a hit. If it's the first block in the requested range,
1332 : * we can return it immediately and report that WaitReadBuffers()
1333 : * does not need to be called. If the initial value of *nblocks
1334 : * was larger, the caller will have to call again for the rest.
1335 : */
1336 118550404 : if (i == 0)
1337 : {
1338 118547588 : *nblocks = 1;
1339 :
1340 : #ifdef USE_ASSERT_CHECKING
1341 :
1342 : /*
1343 : * Initialize enough of ReadBuffersOperation to make
1344 : * CheckReadBuffersOperation() work. Outside of assertions
1345 : * that's not necessary when no IO is issued.
1346 : */
1347 : operation->buffers = buffers;
1348 : operation->blocknum = blockNum;
1349 : operation->nblocks = 1;
1350 : operation->nblocks_done = 1;
1351 : CheckReadBuffersOperation(operation, true);
1352 : #endif
1353 118547588 : return false;
1354 : }
1355 :
1356 : /*
1357 : * Otherwise we already have an I/O to perform, but this block
1358 : * can't be included as it is already valid. Split the I/O here.
1359 : * There may or may not be more blocks requiring I/O after this
1360 : * one, we haven't checked, but they can't be contiguous with this
1361 : * one in the way. We'll leave this buffer pinned, forwarding it
1362 : * to the next call, avoiding the need to unpin it here and re-pin
1363 : * it in the next call.
1364 : */
1365 2816 : actual_nblocks = i;
1366 2816 : break;
1367 : }
1368 : else
1369 : {
1370 : /*
1371 : * Check how many blocks we can cover with the same IO. The smgr
1372 : * implementation might e.g. be limited due to a segment boundary.
1373 : */
1374 2758402 : if (i == 0 && actual_nblocks > 1)
1375 : {
1376 66274 : maxcombine = smgrmaxcombine(operation->smgr,
1377 : operation->forknum,
1378 : blockNum);
1379 66274 : if (unlikely(maxcombine < actual_nblocks))
1380 : {
1381 0 : elog(DEBUG2, "limiting nblocks at %u from %u to %u",
1382 : blockNum, actual_nblocks, maxcombine);
1383 0 : actual_nblocks = maxcombine;
1384 : }
1385 : }
1386 : }
1387 : }
1388 2436582 : *nblocks = actual_nblocks;
1389 :
1390 : /* Populate information needed for I/O. */
1391 2436582 : operation->buffers = buffers;
1392 2436582 : operation->blocknum = blockNum;
1393 2436582 : operation->flags = flags;
1394 2436582 : operation->nblocks = actual_nblocks;
1395 2436582 : operation->nblocks_done = 0;
1396 2436582 : pgaio_wref_clear(&operation->io_wref);
1397 :
1398 : /*
1399 : * When using AIO, start the IO in the background. If not, issue prefetch
1400 : * requests if desired by the caller.
1401 : *
1402 : * The reason we have a dedicated path for IOMETHOD_SYNC here is to
1403 : * de-risk the introduction of AIO somewhat. It's a large architectural
1404 : * change, with lots of chances for unanticipated performance effects.
1405 : *
1406 : * Use of IOMETHOD_SYNC already leads to not actually performing IO
1407 : * asynchronously, but without the check here we'd execute IO earlier than
1408 : * we used to. Eventually this IOMETHOD_SYNC specific path should go away.
1409 : */
1410 2436582 : if (io_method != IOMETHOD_SYNC)
1411 : {
1412 : /*
1413 : * Try to start IO asynchronously. It's possible that no IO needs to
1414 : * be started, if another backend already performed the IO.
1415 : *
1416 : * Note that if an IO is started, it might not cover the entire
1417 : * requested range, e.g. because an intermediary block has been read
1418 : * in by another backend. In that case any "trailing" buffers we
1419 : * already pinned above will be "forwarded" by read_stream.c to the
1420 : * next call to StartReadBuffers().
1421 : *
1422 : * This is signalled to the caller by decrementing *nblocks *and*
1423 : * reducing operation->nblocks. The latter is done here, but not below
1424 : * WaitReadBuffers(), as in WaitReadBuffers() we can't "shorten" the
1425 : * overall read size anymore, we need to retry until done in its
1426 : * entirety or until failed.
1427 : */
1428 2434408 : did_start_io = AsyncReadBuffers(operation, nblocks);
1429 :
1430 2434378 : operation->nblocks = *nblocks;
1431 : }
1432 : else
1433 : {
1434 2174 : operation->flags |= READ_BUFFERS_SYNCHRONOUSLY;
1435 :
1436 2174 : if (flags & READ_BUFFERS_ISSUE_ADVICE)
1437 : {
1438 : /*
1439 : * In theory we should only do this if PinBufferForBlock() had to
1440 : * allocate new buffers above. That way, if two calls to
1441 : * StartReadBuffers() were made for the same blocks before
1442 : * WaitReadBuffers(), only the first would issue the advice.
1443 : * That'd be a better simulation of true asynchronous I/O, which
1444 : * would only start the I/O once, but isn't done here for
1445 : * simplicity.
1446 : */
1447 4 : smgrprefetch(operation->smgr,
1448 : operation->forknum,
1449 : blockNum,
1450 : actual_nblocks);
1451 : }
1452 :
1453 : /*
1454 : * Indicate that WaitReadBuffers() should be called. WaitReadBuffers()
1455 : * will initiate the necessary IO.
1456 : */
1457 2174 : did_start_io = true;
1458 : }
1459 :
1460 2436552 : CheckReadBuffersOperation(operation, !did_start_io);
1461 :
1462 2436552 : return did_start_io;
1463 : }
1464 :
1465 : /*
1466 : * Begin reading a range of blocks beginning at blockNum and extending for
1467 : * *nblocks. *nblocks and the buffers array are in/out parameters. On entry,
1468 : * the buffers elements covered by *nblocks must hold either InvalidBuffer or
1469 : * buffers forwarded by an earlier call to StartReadBuffers() that was split
1470 : * and is now being continued. On return, *nblocks holds the number of blocks
1471 : * accepted by this operation. If it is less than the original number then
1472 : * this operation has been split, but buffer elements up to the original
1473 : * requested size may hold forwarded buffers to be used for a continuing
1474 : * operation. The caller must either start a new I/O beginning at the block
1475 : * immediately following the blocks accepted by this call and pass those
1476 : * buffers back in, or release them if it chooses not to. It shouldn't make
1477 : * any other use of or assumptions about forwarded buffers.
1478 : *
1479 : * If false is returned, no I/O is necessary and the buffers covered by
1480 : * *nblocks on exit are valid and ready to be accessed. If true is returned,
1481 : * an I/O has been started, and WaitReadBuffers() must be called with the same
1482 : * operation object before the buffers covered by *nblocks on exit can be
1483 : * accessed. Along with the operation object, the caller-supplied array of
1484 : * buffers must remain valid until WaitReadBuffers() is called, and any
1485 : * forwarded buffers must also be preserved for a continuing call unless
1486 : * they are explicitly released.
1487 : *
1488 : * Currently the I/O is only started with optional operating system advice if
1489 : * requested by the caller with READ_BUFFERS_ISSUE_ADVICE, and the real I/O
1490 : * happens synchronously in WaitReadBuffers(). In future work, true I/O could
1491 : * be initiated here.
1492 : */
1493 : bool
1494 3384780 : StartReadBuffers(ReadBuffersOperation *operation,
1495 : Buffer *buffers,
1496 : BlockNumber blockNum,
1497 : int *nblocks,
1498 : int flags)
1499 : {
1500 3384780 : return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags,
1501 : true /* expect forwarded buffers */ );
1502 : }
1503 :
1504 : /*
1505 : * Single block version of the StartReadBuffers(). This might save a few
1506 : * instructions when called from another translation unit, because it is
1507 : * specialized for nblocks == 1.
1508 : *
1509 : * This version does not support "forwarded" buffers: they cannot be created
1510 : * by reading only one block and *buffer is ignored on entry.
1511 : */
1512 : bool
1513 117599402 : StartReadBuffer(ReadBuffersOperation *operation,
1514 : Buffer *buffer,
1515 : BlockNumber blocknum,
1516 : int flags)
1517 : {
1518 117599402 : int nblocks = 1;
1519 : bool result;
1520 :
1521 117599402 : result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags,
1522 : false /* single block, no forwarding */ );
1523 : Assert(nblocks == 1); /* single block can't be short */
1524 :
1525 117599372 : return result;
1526 : }
1527 :
1528 : /*
1529 : * Perform sanity checks on the ReadBuffersOperation.
1530 : */
1531 : static void
1532 7282972 : CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete)
1533 : {
1534 : #ifdef USE_ASSERT_CHECKING
1535 : Assert(operation->nblocks_done <= operation->nblocks);
1536 : Assert(!is_complete || operation->nblocks == operation->nblocks_done);
1537 :
1538 : for (int i = 0; i < operation->nblocks; i++)
1539 : {
1540 : Buffer buffer = operation->buffers[i];
1541 : BufferDesc *buf_hdr = BufferIsLocal(buffer) ?
1542 : GetLocalBufferDescriptor(-buffer - 1) :
1543 : GetBufferDescriptor(buffer - 1);
1544 :
1545 : Assert(BufferGetBlockNumber(buffer) == operation->blocknum + i);
1546 : Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_TAG_VALID);
1547 :
1548 : if (i < operation->nblocks_done)
1549 : Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_VALID);
1550 : }
1551 : #endif
1552 7282972 : }
1553 :
1554 : /* helper for ReadBuffersCanStartIO(), to avoid repetition */
1555 : static inline bool
1556 2758438 : ReadBuffersCanStartIOOnce(Buffer buffer, bool nowait)
1557 : {
1558 2758438 : if (BufferIsLocal(buffer))
1559 16710 : return StartLocalBufferIO(GetLocalBufferDescriptor(-buffer - 1),
1560 : true, nowait);
1561 : else
1562 2741728 : return StartBufferIO(GetBufferDescriptor(buffer - 1), true, nowait);
1563 : }
1564 :
1565 : /*
1566 : * Helper for AsyncReadBuffers that tries to get the buffer ready for IO.
1567 : */
1568 : static inline bool
1569 2758438 : ReadBuffersCanStartIO(Buffer buffer, bool nowait)
1570 : {
1571 : /*
1572 : * If this backend currently has staged IO, we need to submit the pending
1573 : * IO before waiting for the right to issue IO, to avoid the potential for
1574 : * deadlocks (and, more commonly, unnecessary delays for other backends).
1575 : */
1576 2758438 : if (!nowait && pgaio_have_staged())
1577 : {
1578 1112 : if (ReadBuffersCanStartIOOnce(buffer, true))
1579 1112 : return true;
1580 :
1581 : /*
1582 : * Unfortunately StartBufferIO() returning false doesn't allow to
1583 : * distinguish between the buffer already being valid and IO already
1584 : * being in progress. Since IO already being in progress is quite
1585 : * rare, this approach seems fine.
1586 : */
1587 0 : pgaio_submit_staged();
1588 : }
1589 :
1590 2757326 : return ReadBuffersCanStartIOOnce(buffer, nowait);
1591 : }
1592 :
1593 : /*
1594 : * Helper for WaitReadBuffers() that processes the results of a readv
1595 : * operation, raising an error if necessary.
1596 : */
1597 : static void
1598 2422162 : ProcessReadBuffersResult(ReadBuffersOperation *operation)
1599 : {
1600 2422162 : PgAioReturn *aio_ret = &operation->io_return;
1601 2422162 : PgAioResultStatus rs = aio_ret->result.status;
1602 2422162 : int newly_read_blocks = 0;
1603 :
1604 : Assert(pgaio_wref_valid(&operation->io_wref));
1605 : Assert(aio_ret->result.status != PGAIO_RS_UNKNOWN);
1606 :
1607 : /*
1608 : * SMGR reports the number of blocks successfully read as the result of
1609 : * the IO operation. Thus we can simply add that to ->nblocks_done.
1610 : */
1611 :
1612 2422162 : if (likely(rs != PGAIO_RS_ERROR))
1613 2422104 : newly_read_blocks = aio_ret->result.result;
1614 :
1615 2422162 : if (rs == PGAIO_RS_ERROR || rs == PGAIO_RS_WARNING)
1616 90 : pgaio_result_report(aio_ret->result, &aio_ret->target_data,
1617 : rs == PGAIO_RS_ERROR ? ERROR : WARNING);
1618 2422072 : else if (aio_ret->result.status == PGAIO_RS_PARTIAL)
1619 : {
1620 : /*
1621 : * We'll retry, so we just emit a debug message to the server log (or
1622 : * not even that in prod scenarios).
1623 : */
1624 20 : pgaio_result_report(aio_ret->result, &aio_ret->target_data, DEBUG1);
1625 20 : elog(DEBUG3, "partial read, will retry");
1626 : }
1627 :
1628 : Assert(newly_read_blocks > 0);
1629 : Assert(newly_read_blocks <= MAX_IO_COMBINE_LIMIT);
1630 :
1631 2422104 : operation->nblocks_done += newly_read_blocks;
1632 :
1633 : Assert(operation->nblocks_done <= operation->nblocks);
1634 2422104 : }
1635 :
1636 : void
1637 2422142 : WaitReadBuffers(ReadBuffersOperation *operation)
1638 : {
1639 2422142 : PgAioReturn *aio_ret = &operation->io_return;
1640 : IOContext io_context;
1641 : IOObject io_object;
1642 :
1643 2422142 : if (operation->persistence == RELPERSISTENCE_TEMP)
1644 : {
1645 2956 : io_context = IOCONTEXT_NORMAL;
1646 2956 : io_object = IOOBJECT_TEMP_RELATION;
1647 : }
1648 : else
1649 : {
1650 2419186 : io_context = IOContextForStrategy(operation->strategy);
1651 2419186 : io_object = IOOBJECT_RELATION;
1652 : }
1653 :
1654 : /*
1655 : * If we get here without an IO operation having been issued, the
1656 : * io_method == IOMETHOD_SYNC path must have been used. Otherwise the
1657 : * caller should not have called WaitReadBuffers().
1658 : *
1659 : * In the case of IOMETHOD_SYNC, we start - as we used to before the
1660 : * introducing of AIO - the IO in WaitReadBuffers(). This is done as part
1661 : * of the retry logic below, no extra code is required.
1662 : *
1663 : * This path is expected to eventually go away.
1664 : */
1665 2422142 : if (!pgaio_wref_valid(&operation->io_wref) && io_method != IOMETHOD_SYNC)
1666 0 : elog(ERROR, "waiting for read operation that didn't read");
1667 :
1668 : /*
1669 : * To handle partial reads, and IOMETHOD_SYNC, we re-issue IO until we're
1670 : * done. We may need multiple retries, not just because we could get
1671 : * multiple partial reads, but also because some of the remaining
1672 : * to-be-read buffers may have been read in by other backends, limiting
1673 : * the IO size.
1674 : */
1675 : while (true)
1676 2194 : {
1677 : int ignored_nblocks_progress;
1678 :
1679 2424336 : CheckReadBuffersOperation(operation, false);
1680 :
1681 : /*
1682 : * If there is an IO associated with the operation, we may need to
1683 : * wait for it.
1684 : */
1685 2424336 : if (pgaio_wref_valid(&operation->io_wref))
1686 : {
1687 : /*
1688 : * Track the time spent waiting for the IO to complete. As
1689 : * tracking a wait even if we don't actually need to wait
1690 : *
1691 : * a) is not cheap, due to the timestamping overhead
1692 : *
1693 : * b) reports some time as waiting, even if we never waited
1694 : *
1695 : * we first check if we already know the IO is complete.
1696 : */
1697 2422162 : if (aio_ret->result.status == PGAIO_RS_UNKNOWN &&
1698 1058856 : !pgaio_wref_check_done(&operation->io_wref))
1699 : {
1700 350730 : instr_time io_start = pgstat_prepare_io_time(track_io_timing);
1701 :
1702 350730 : pgaio_wref_wait(&operation->io_wref);
1703 :
1704 : /*
1705 : * The IO operation itself was already counted earlier, in
1706 : * AsyncReadBuffers(), this just accounts for the wait time.
1707 : */
1708 350730 : pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
1709 : io_start, 0, 0);
1710 : }
1711 : else
1712 : {
1713 : Assert(pgaio_wref_check_done(&operation->io_wref));
1714 : }
1715 :
1716 : /*
1717 : * We now are sure the IO completed. Check the results. This
1718 : * includes reporting on errors if there were any.
1719 : */
1720 2422162 : ProcessReadBuffersResult(operation);
1721 : }
1722 :
1723 : /*
1724 : * Most of the time, the one IO we already started, will read in
1725 : * everything. But we need to deal with partial reads and buffers not
1726 : * needing IO anymore.
1727 : */
1728 2424278 : if (operation->nblocks_done == operation->nblocks)
1729 2422084 : break;
1730 :
1731 2194 : CHECK_FOR_INTERRUPTS();
1732 :
1733 : /*
1734 : * This may only complete the IO partially, either because some
1735 : * buffers were already valid, or because of a partial read.
1736 : *
1737 : * NB: In contrast to after the AsyncReadBuffers() call in
1738 : * StartReadBuffers(), we do *not* reduce
1739 : * ReadBuffersOperation->nblocks here, callers expect the full
1740 : * operation to be completed at this point (as more operations may
1741 : * have been queued).
1742 : */
1743 2194 : AsyncReadBuffers(operation, &ignored_nblocks_progress);
1744 : }
1745 :
1746 2422084 : CheckReadBuffersOperation(operation, true);
1747 :
1748 : /* NB: READ_DONE tracepoint was already executed in completion callback */
1749 2422084 : }
1750 :
1751 : /*
1752 : * Initiate IO for the ReadBuffersOperation
1753 : *
1754 : * This function only starts a single IO at a time. The size of the IO may be
1755 : * limited to below the to-be-read blocks, if one of the buffers has
1756 : * concurrently been read in. If the first to-be-read buffer is already valid,
1757 : * no IO will be issued.
1758 : *
1759 : * To support retries after partial reads, the first operation->nblocks_done
1760 : * buffers are skipped.
1761 : *
1762 : * On return *nblocks_progress is updated to reflect the number of buffers
1763 : * affected by the call. If the first buffer is valid, *nblocks_progress is
1764 : * set to 1 and operation->nblocks_done is incremented.
1765 : *
1766 : * Returns true if IO was initiated, false if no IO was necessary.
1767 : */
1768 : static bool
1769 2436602 : AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress)
1770 : {
1771 2436602 : Buffer *buffers = &operation->buffers[0];
1772 2436602 : int flags = operation->flags;
1773 2436602 : BlockNumber blocknum = operation->blocknum;
1774 2436602 : ForkNumber forknum = operation->forknum;
1775 2436602 : char persistence = operation->persistence;
1776 2436602 : int16 nblocks_done = operation->nblocks_done;
1777 2436602 : Buffer *io_buffers = &operation->buffers[nblocks_done];
1778 2436602 : int io_buffers_len = 0;
1779 : PgAioHandle *ioh;
1780 2436602 : uint32 ioh_flags = 0;
1781 : void *io_pages[MAX_IO_COMBINE_LIMIT];
1782 : IOContext io_context;
1783 : IOObject io_object;
1784 : bool did_start_io;
1785 :
1786 : /*
1787 : * When this IO is executed synchronously, either because the caller will
1788 : * immediately block waiting for the IO or because IOMETHOD_SYNC is used,
1789 : * the AIO subsystem needs to know.
1790 : */
1791 2436602 : if (flags & READ_BUFFERS_SYNCHRONOUSLY)
1792 1360070 : ioh_flags |= PGAIO_HF_SYNCHRONOUS;
1793 :
1794 2436602 : if (persistence == RELPERSISTENCE_TEMP)
1795 : {
1796 3544 : io_context = IOCONTEXT_NORMAL;
1797 3544 : io_object = IOOBJECT_TEMP_RELATION;
1798 3544 : ioh_flags |= PGAIO_HF_REFERENCES_LOCAL;
1799 : }
1800 : else
1801 : {
1802 2433058 : io_context = IOContextForStrategy(operation->strategy);
1803 2433058 : io_object = IOOBJECT_RELATION;
1804 : }
1805 :
1806 : /*
1807 : * If zero_damaged_pages is enabled, add the READ_BUFFERS_ZERO_ON_ERROR
1808 : * flag. The reason for that is that, hopefully, zero_damaged_pages isn't
1809 : * set globally, but on a per-session basis. The completion callback,
1810 : * which may be run in other processes, e.g. in IO workers, may have a
1811 : * different value of the zero_damaged_pages GUC.
1812 : *
1813 : * XXX: We probably should eventually use a different flag for
1814 : * zero_damaged_pages, so we can report different log levels / error codes
1815 : * for zero_damaged_pages and ZERO_ON_ERROR.
1816 : */
1817 2436602 : if (zero_damaged_pages)
1818 32 : flags |= READ_BUFFERS_ZERO_ON_ERROR;
1819 :
1820 : /*
1821 : * For the same reason as with zero_damaged_pages we need to use this
1822 : * backend's ignore_checksum_failure value.
1823 : */
1824 2436602 : if (ignore_checksum_failure)
1825 16 : flags |= READ_BUFFERS_IGNORE_CHECKSUM_FAILURES;
1826 :
1827 :
1828 : /*
1829 : * To be allowed to report stats in the local completion callback we need
1830 : * to prepare to report stats now. This ensures we can safely report the
1831 : * checksum failure even in a critical section.
1832 : */
1833 2436602 : pgstat_prepare_report_checksum_failure(operation->smgr->smgr_rlocator.locator.dbOid);
1834 :
1835 : /*
1836 : * Get IO handle before ReadBuffersCanStartIO(), as pgaio_io_acquire()
1837 : * might block, which we don't want after setting IO_IN_PROGRESS.
1838 : *
1839 : * If we need to wait for IO before we can get a handle, submit
1840 : * already-staged IO first, so that other backends don't need to wait.
1841 : * There wouldn't be a deadlock risk, as pgaio_io_acquire() just needs to
1842 : * wait for already submitted IO, which doesn't require additional locks,
1843 : * but it could still cause undesirable waits.
1844 : *
1845 : * A secondary benefit is that this would allow us to measure the time in
1846 : * pgaio_io_acquire() without causing undue timer overhead in the common,
1847 : * non-blocking, case. However, currently the pgstats infrastructure
1848 : * doesn't really allow that, as it a) asserts that an operation can't
1849 : * have time without operations b) doesn't have an API to report
1850 : * "accumulated" time.
1851 : */
1852 2436602 : ioh = pgaio_io_acquire_nb(CurrentResourceOwner, &operation->io_return);
1853 2436602 : if (unlikely(!ioh))
1854 : {
1855 6030 : pgaio_submit_staged();
1856 :
1857 6030 : ioh = pgaio_io_acquire(CurrentResourceOwner, &operation->io_return);
1858 : }
1859 :
1860 : /*
1861 : * Check if we can start IO on the first to-be-read buffer.
1862 : *
1863 : * If an I/O is already in progress in another backend, we want to wait
1864 : * for the outcome: either done, or something went wrong and we will
1865 : * retry.
1866 : */
1867 2436602 : if (!ReadBuffersCanStartIO(buffers[nblocks_done], false))
1868 : {
1869 : /*
1870 : * Someone else has already completed this block, we're done.
1871 : *
1872 : * When IO is necessary, ->nblocks_done is updated in
1873 : * ProcessReadBuffersResult(), but that is not called if no IO is
1874 : * necessary. Thus update here.
1875 : */
1876 13812 : operation->nblocks_done += 1;
1877 13812 : *nblocks_progress = 1;
1878 :
1879 13812 : pgaio_io_release(ioh);
1880 13812 : pgaio_wref_clear(&operation->io_wref);
1881 13812 : did_start_io = false;
1882 :
1883 : /*
1884 : * Report and track this as a 'hit' for this backend, even though it
1885 : * must have started out as a miss in PinBufferForBlock(). The other
1886 : * backend will track this as a 'read'.
1887 : */
1888 : TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + operation->nblocks_done,
1889 : operation->smgr->smgr_rlocator.locator.spcOid,
1890 : operation->smgr->smgr_rlocator.locator.dbOid,
1891 : operation->smgr->smgr_rlocator.locator.relNumber,
1892 : operation->smgr->smgr_rlocator.backend,
1893 : true);
1894 :
1895 13812 : if (persistence == RELPERSISTENCE_TEMP)
1896 0 : pgBufferUsage.local_blks_hit += 1;
1897 : else
1898 13812 : pgBufferUsage.shared_blks_hit += 1;
1899 :
1900 13812 : if (operation->rel)
1901 13812 : pgstat_count_buffer_hit(operation->rel);
1902 :
1903 13812 : pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
1904 :
1905 13812 : if (VacuumCostActive)
1906 40 : VacuumCostBalance += VacuumCostPageHit;
1907 : }
1908 : else
1909 : {
1910 : instr_time io_start;
1911 :
1912 : /* We found a buffer that we need to read in. */
1913 : Assert(io_buffers[0] == buffers[nblocks_done]);
1914 2422790 : io_pages[0] = BufferGetBlock(buffers[nblocks_done]);
1915 2422790 : io_buffers_len = 1;
1916 :
1917 : /*
1918 : * How many neighboring-on-disk blocks can we scatter-read into other
1919 : * buffers at the same time? In this case we don't wait if we see an
1920 : * I/O already in progress. We already set BM_IO_IN_PROGRESS for the
1921 : * head block, so we should get on with that I/O as soon as possible.
1922 : */
1923 2744626 : for (int i = nblocks_done + 1; i < operation->nblocks; i++)
1924 : {
1925 321836 : if (!ReadBuffersCanStartIO(buffers[i], true))
1926 0 : break;
1927 : /* Must be consecutive block numbers. */
1928 : Assert(BufferGetBlockNumber(buffers[i - 1]) ==
1929 : BufferGetBlockNumber(buffers[i]) - 1);
1930 : Assert(io_buffers[io_buffers_len] == buffers[i]);
1931 :
1932 321836 : io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
1933 : }
1934 :
1935 : /* get a reference to wait for in WaitReadBuffers() */
1936 2422790 : pgaio_io_get_wref(ioh, &operation->io_wref);
1937 :
1938 : /* provide the list of buffers to the completion callbacks */
1939 2422790 : pgaio_io_set_handle_data_32(ioh, (uint32 *) io_buffers, io_buffers_len);
1940 :
1941 2422790 : pgaio_io_register_callbacks(ioh,
1942 : persistence == RELPERSISTENCE_TEMP ?
1943 : PGAIO_HCB_LOCAL_BUFFER_READV :
1944 : PGAIO_HCB_SHARED_BUFFER_READV,
1945 : flags);
1946 :
1947 2422790 : pgaio_io_set_flag(ioh, ioh_flags);
1948 :
1949 : /* ---
1950 : * Even though we're trying to issue IO asynchronously, track the time
1951 : * in smgrstartreadv():
1952 : * - if io_method == IOMETHOD_SYNC, we will always perform the IO
1953 : * immediately
1954 : * - the io method might not support the IO (e.g. worker IO for a temp
1955 : * table)
1956 : * ---
1957 : */
1958 2422790 : io_start = pgstat_prepare_io_time(track_io_timing);
1959 2422790 : smgrstartreadv(ioh, operation->smgr, forknum,
1960 : blocknum + nblocks_done,
1961 : io_pages, io_buffers_len);
1962 2422760 : pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
1963 2422760 : io_start, 1, io_buffers_len * BLCKSZ);
1964 :
1965 2422760 : if (persistence == RELPERSISTENCE_TEMP)
1966 3544 : pgBufferUsage.local_blks_read += io_buffers_len;
1967 : else
1968 2419216 : pgBufferUsage.shared_blks_read += io_buffers_len;
1969 :
1970 : /*
1971 : * Track vacuum cost when issuing IO, not after waiting for it.
1972 : * Otherwise we could end up issuing a lot of IO in a short timespan,
1973 : * despite a low cost limit.
1974 : */
1975 2422760 : if (VacuumCostActive)
1976 34450 : VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
1977 :
1978 2422760 : *nblocks_progress = io_buffers_len;
1979 2422760 : did_start_io = true;
1980 : }
1981 :
1982 2436572 : return did_start_io;
1983 : }
1984 :
1985 : /*
1986 : * BufferAlloc -- subroutine for PinBufferForBlock. Handles lookup of a shared
1987 : * buffer. If no buffer exists already, selects a replacement victim and
1988 : * evicts the old page, but does NOT read in new page.
1989 : *
1990 : * "strategy" can be a buffer replacement strategy object, or NULL for
1991 : * the default strategy. The selected buffer's usage_count is advanced when
1992 : * using the default strategy, but otherwise possibly not (see PinBuffer).
1993 : *
1994 : * The returned buffer is pinned and is already marked as holding the
1995 : * desired page. If it already did have the desired page, *foundPtr is
1996 : * set true. Otherwise, *foundPtr is set false.
1997 : *
1998 : * io_context is passed as an output parameter to avoid calling
1999 : * IOContextForStrategy() when there is a shared buffers hit and no IO
2000 : * statistics need be captured.
2001 : *
2002 : * No locks are held either at entry or exit.
2003 : */
2004 : static pg_attribute_always_inline BufferDesc *
2005 119388486 : BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
2006 : BlockNumber blockNum,
2007 : BufferAccessStrategy strategy,
2008 : bool *foundPtr, IOContext io_context)
2009 : {
2010 : BufferTag newTag; /* identity of requested block */
2011 : uint32 newHash; /* hash value for newTag */
2012 : LWLock *newPartitionLock; /* buffer partition lock for it */
2013 : int existing_buf_id;
2014 : Buffer victim_buffer;
2015 : BufferDesc *victim_buf_hdr;
2016 : uint32 victim_buf_state;
2017 :
2018 : /* Make sure we will have room to remember the buffer pin */
2019 119388486 : ResourceOwnerEnlarge(CurrentResourceOwner);
2020 119388486 : ReservePrivateRefCountEntry();
2021 :
2022 : /* create a tag so we can lookup the buffer */
2023 119388486 : InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
2024 :
2025 : /* determine its hash code and partition lock ID */
2026 119388486 : newHash = BufTableHashCode(&newTag);
2027 119388486 : newPartitionLock = BufMappingPartitionLock(newHash);
2028 :
2029 : /* see if the block is in the buffer pool already */
2030 119388486 : LWLockAcquire(newPartitionLock, LW_SHARED);
2031 119388486 : existing_buf_id = BufTableLookup(&newTag, newHash);
2032 119388486 : if (existing_buf_id >= 0)
2033 : {
2034 : BufferDesc *buf;
2035 : bool valid;
2036 :
2037 : /*
2038 : * Found it. Now, pin the buffer so no one can steal it from the
2039 : * buffer pool, and check to see if the correct data has been loaded
2040 : * into the buffer.
2041 : */
2042 116107752 : buf = GetBufferDescriptor(existing_buf_id);
2043 :
2044 116107752 : valid = PinBuffer(buf, strategy);
2045 :
2046 : /* Can release the mapping lock as soon as we've pinned it */
2047 116107752 : LWLockRelease(newPartitionLock);
2048 :
2049 116107752 : *foundPtr = true;
2050 :
2051 116107752 : if (!valid)
2052 : {
2053 : /*
2054 : * We can only get here if (a) someone else is still reading in
2055 : * the page, (b) a previous read attempt failed, or (c) someone
2056 : * called StartReadBuffers() but not yet WaitReadBuffers().
2057 : */
2058 12112 : *foundPtr = false;
2059 : }
2060 :
2061 116107752 : return buf;
2062 : }
2063 :
2064 : /*
2065 : * Didn't find it in the buffer pool. We'll have to initialize a new
2066 : * buffer. Remember to unlock the mapping lock while doing the work.
2067 : */
2068 3280734 : LWLockRelease(newPartitionLock);
2069 :
2070 : /*
2071 : * Acquire a victim buffer. Somebody else might try to do the same, we
2072 : * don't hold any conflicting locks. If so we'll have to undo our work
2073 : * later.
2074 : */
2075 3280734 : victim_buffer = GetVictimBuffer(strategy, io_context);
2076 3280734 : victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
2077 :
2078 : /*
2079 : * Try to make a hashtable entry for the buffer under its new tag. If
2080 : * somebody else inserted another buffer for the tag, we'll release the
2081 : * victim buffer we acquired and use the already inserted one.
2082 : */
2083 3280734 : LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
2084 3280734 : existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
2085 3280734 : if (existing_buf_id >= 0)
2086 : {
2087 : BufferDesc *existing_buf_hdr;
2088 : bool valid;
2089 :
2090 : /*
2091 : * Got a collision. Someone has already done what we were about to do.
2092 : * We'll just handle this as if it were found in the buffer pool in
2093 : * the first place. First, give up the buffer we were planning to
2094 : * use.
2095 : *
2096 : * We could do this after releasing the partition lock, but then we'd
2097 : * have to call ResourceOwnerEnlarge() & ReservePrivateRefCountEntry()
2098 : * before acquiring the lock, for the rare case of such a collision.
2099 : */
2100 2352 : UnpinBuffer(victim_buf_hdr);
2101 :
2102 : /*
2103 : * The victim buffer we acquired previously is clean and unused, let
2104 : * it be found again quickly
2105 : */
2106 2352 : StrategyFreeBuffer(victim_buf_hdr);
2107 :
2108 : /* remaining code should match code at top of routine */
2109 :
2110 2352 : existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
2111 :
2112 2352 : valid = PinBuffer(existing_buf_hdr, strategy);
2113 :
2114 : /* Can release the mapping lock as soon as we've pinned it */
2115 2352 : LWLockRelease(newPartitionLock);
2116 :
2117 2352 : *foundPtr = true;
2118 :
2119 2352 : if (!valid)
2120 : {
2121 : /*
2122 : * We can only get here if (a) someone else is still reading in
2123 : * the page, (b) a previous read attempt failed, or (c) someone
2124 : * called StartReadBuffers() but not yet WaitReadBuffers().
2125 : */
2126 1796 : *foundPtr = false;
2127 : }
2128 :
2129 2352 : return existing_buf_hdr;
2130 : }
2131 :
2132 : /*
2133 : * Need to lock the buffer header too in order to change its tag.
2134 : */
2135 3278382 : victim_buf_state = LockBufHdr(victim_buf_hdr);
2136 :
2137 : /* some sanity checks while we hold the buffer header lock */
2138 : Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
2139 : Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
2140 :
2141 3278382 : victim_buf_hdr->tag = newTag;
2142 :
2143 : /*
2144 : * Make sure BM_PERMANENT is set for buffers that must be written at every
2145 : * checkpoint. Unlogged buffers only need to be written at shutdown
2146 : * checkpoints, except for their "init" forks, which need to be treated
2147 : * just like permanent relations.
2148 : */
2149 3278382 : victim_buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
2150 3278382 : if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
2151 3277698 : victim_buf_state |= BM_PERMANENT;
2152 :
2153 3278382 : UnlockBufHdr(victim_buf_hdr, victim_buf_state);
2154 :
2155 3278382 : LWLockRelease(newPartitionLock);
2156 :
2157 : /*
2158 : * Buffer contents are currently invalid.
2159 : */
2160 3278382 : *foundPtr = false;
2161 :
2162 3278382 : return victim_buf_hdr;
2163 : }
2164 :
2165 : /*
2166 : * InvalidateBuffer -- mark a shared buffer invalid and return it to the
2167 : * freelist.
2168 : *
2169 : * The buffer header spinlock must be held at entry. We drop it before
2170 : * returning. (This is sane because the caller must have locked the
2171 : * buffer in order to be sure it should be dropped.)
2172 : *
2173 : * This is used only in contexts such as dropping a relation. We assume
2174 : * that no other backend could possibly be interested in using the page,
2175 : * so the only reason the buffer might be pinned is if someone else is
2176 : * trying to write it out. We have to let them finish before we can
2177 : * reclaim the buffer.
2178 : *
2179 : * The buffer could get reclaimed by someone else while we are waiting
2180 : * to acquire the necessary locks; if so, don't mess it up.
2181 : */
2182 : static void
2183 211506 : InvalidateBuffer(BufferDesc *buf)
2184 : {
2185 : BufferTag oldTag;
2186 : uint32 oldHash; /* hash value for oldTag */
2187 : LWLock *oldPartitionLock; /* buffer partition lock for it */
2188 : uint32 oldFlags;
2189 : uint32 buf_state;
2190 :
2191 : /* Save the original buffer tag before dropping the spinlock */
2192 211506 : oldTag = buf->tag;
2193 :
2194 211506 : buf_state = pg_atomic_read_u32(&buf->state);
2195 : Assert(buf_state & BM_LOCKED);
2196 211506 : UnlockBufHdr(buf, buf_state);
2197 :
2198 : /*
2199 : * Need to compute the old tag's hashcode and partition lock ID. XXX is it
2200 : * worth storing the hashcode in BufferDesc so we need not recompute it
2201 : * here? Probably not.
2202 : */
2203 211506 : oldHash = BufTableHashCode(&oldTag);
2204 211506 : oldPartitionLock = BufMappingPartitionLock(oldHash);
2205 :
2206 211508 : retry:
2207 :
2208 : /*
2209 : * Acquire exclusive mapping lock in preparation for changing the buffer's
2210 : * association.
2211 : */
2212 211508 : LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
2213 :
2214 : /* Re-lock the buffer header */
2215 211508 : buf_state = LockBufHdr(buf);
2216 :
2217 : /* If it's changed while we were waiting for lock, do nothing */
2218 211508 : if (!BufferTagsEqual(&buf->tag, &oldTag))
2219 : {
2220 4 : UnlockBufHdr(buf, buf_state);
2221 4 : LWLockRelease(oldPartitionLock);
2222 4 : return;
2223 : }
2224 :
2225 : /*
2226 : * We assume the reason for it to be pinned is that either we were
2227 : * asynchronously reading the page in before erroring out or someone else
2228 : * is flushing the page out. Wait for the IO to finish. (This could be
2229 : * an infinite loop if the refcount is messed up... it would be nice to
2230 : * time out after awhile, but there seems no way to be sure how many loops
2231 : * may be needed. Note that if the other guy has pinned the buffer but
2232 : * not yet done StartBufferIO, WaitIO will fall through and we'll
2233 : * effectively be busy-looping here.)
2234 : */
2235 211504 : if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
2236 : {
2237 2 : UnlockBufHdr(buf, buf_state);
2238 2 : LWLockRelease(oldPartitionLock);
2239 : /* safety check: should definitely not be our *own* pin */
2240 2 : if (GetPrivateRefCount(BufferDescriptorGetBuffer(buf)) > 0)
2241 0 : elog(ERROR, "buffer is pinned in InvalidateBuffer");
2242 2 : WaitIO(buf);
2243 2 : goto retry;
2244 : }
2245 :
2246 : /*
2247 : * Clear out the buffer's tag and flags. We must do this to ensure that
2248 : * linear scans of the buffer array don't think the buffer is valid.
2249 : */
2250 211502 : oldFlags = buf_state & BUF_FLAG_MASK;
2251 211502 : ClearBufferTag(&buf->tag);
2252 211502 : buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
2253 211502 : UnlockBufHdr(buf, buf_state);
2254 :
2255 : /*
2256 : * Remove the buffer from the lookup hashtable, if it was in there.
2257 : */
2258 211502 : if (oldFlags & BM_TAG_VALID)
2259 211502 : BufTableDelete(&oldTag, oldHash);
2260 :
2261 : /*
2262 : * Done with mapping lock.
2263 : */
2264 211502 : LWLockRelease(oldPartitionLock);
2265 :
2266 : /*
2267 : * Insert the buffer at the head of the list of free buffers.
2268 : */
2269 211502 : StrategyFreeBuffer(buf);
2270 : }
2271 :
2272 : /*
2273 : * Helper routine for GetVictimBuffer()
2274 : *
2275 : * Needs to be called on a buffer with a valid tag, pinned, but without the
2276 : * buffer header spinlock held.
2277 : *
2278 : * Returns true if the buffer can be reused, in which case the buffer is only
2279 : * pinned by this backend and marked as invalid, false otherwise.
2280 : */
2281 : static bool
2282 2292484 : InvalidateVictimBuffer(BufferDesc *buf_hdr)
2283 : {
2284 : uint32 buf_state;
2285 : uint32 hash;
2286 : LWLock *partition_lock;
2287 : BufferTag tag;
2288 :
2289 : Assert(GetPrivateRefCount(BufferDescriptorGetBuffer(buf_hdr)) == 1);
2290 :
2291 : /* have buffer pinned, so it's safe to read tag without lock */
2292 2292484 : tag = buf_hdr->tag;
2293 :
2294 2292484 : hash = BufTableHashCode(&tag);
2295 2292484 : partition_lock = BufMappingPartitionLock(hash);
2296 :
2297 2292484 : LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2298 :
2299 : /* lock the buffer header */
2300 2292484 : buf_state = LockBufHdr(buf_hdr);
2301 :
2302 : /*
2303 : * We have the buffer pinned nobody else should have been able to unset
2304 : * this concurrently.
2305 : */
2306 : Assert(buf_state & BM_TAG_VALID);
2307 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2308 : Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
2309 :
2310 : /*
2311 : * If somebody else pinned the buffer since, or even worse, dirtied it,
2312 : * give up on this buffer: It's clearly in use.
2313 : */
2314 2292484 : if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
2315 : {
2316 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2317 :
2318 674 : UnlockBufHdr(buf_hdr, buf_state);
2319 674 : LWLockRelease(partition_lock);
2320 :
2321 674 : return false;
2322 : }
2323 :
2324 : /*
2325 : * Clear out the buffer's tag and flags and usagecount. This is not
2326 : * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
2327 : * doing anything with the buffer. But currently it's beneficial, as the
2328 : * cheaper pre-check for several linear scans of shared buffers use the
2329 : * tag (see e.g. FlushDatabaseBuffers()).
2330 : */
2331 2291810 : ClearBufferTag(&buf_hdr->tag);
2332 2291810 : buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
2333 2291810 : UnlockBufHdr(buf_hdr, buf_state);
2334 :
2335 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2336 :
2337 : /* finally delete buffer from the buffer mapping table */
2338 2291810 : BufTableDelete(&tag, hash);
2339 :
2340 2291810 : LWLockRelease(partition_lock);
2341 :
2342 : Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
2343 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2344 : Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u32(&buf_hdr->state)) > 0);
2345 :
2346 2291810 : return true;
2347 : }
2348 :
2349 : static Buffer
2350 3738290 : GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
2351 : {
2352 : BufferDesc *buf_hdr;
2353 : Buffer buf;
2354 : uint32 buf_state;
2355 : bool from_ring;
2356 :
2357 : /*
2358 : * Ensure, while the spinlock's not yet held, that there's a free refcount
2359 : * entry, and a resource owner slot for the pin.
2360 : */
2361 3738290 : ReservePrivateRefCountEntry();
2362 3738290 : ResourceOwnerEnlarge(CurrentResourceOwner);
2363 :
2364 : /* we return here if a prospective victim buffer gets used concurrently */
2365 11718 : again:
2366 :
2367 : /*
2368 : * Select a victim buffer. The buffer is returned with its header
2369 : * spinlock still held!
2370 : */
2371 3750008 : buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
2372 3750008 : buf = BufferDescriptorGetBuffer(buf_hdr);
2373 :
2374 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
2375 :
2376 : /* Pin the buffer and then release the buffer spinlock */
2377 3750008 : PinBuffer_Locked(buf_hdr);
2378 :
2379 : /*
2380 : * We shouldn't have any other pins for this buffer.
2381 : */
2382 3750008 : CheckBufferIsPinnedOnce(buf);
2383 :
2384 : /*
2385 : * If the buffer was dirty, try to write it out. There is a race
2386 : * condition here, in that someone might dirty it after we released the
2387 : * buffer header lock above, or even while we are writing it out (since
2388 : * our share-lock won't prevent hint-bit updates). We will recheck the
2389 : * dirty bit after re-locking the buffer header.
2390 : */
2391 3750008 : if (buf_state & BM_DIRTY)
2392 : {
2393 : LWLock *content_lock;
2394 :
2395 : Assert(buf_state & BM_TAG_VALID);
2396 : Assert(buf_state & BM_VALID);
2397 :
2398 : /*
2399 : * We need a share-lock on the buffer contents to write it out (else
2400 : * we might write invalid data, eg because someone else is compacting
2401 : * the page contents while we write). We must use a conditional lock
2402 : * acquisition here to avoid deadlock. Even though the buffer was not
2403 : * pinned (and therefore surely not locked) when StrategyGetBuffer
2404 : * returned it, someone else could have pinned and exclusive-locked it
2405 : * by the time we get here. If we try to get the lock unconditionally,
2406 : * we'd block waiting for them; if they later block waiting for us,
2407 : * deadlock ensues. (This has been observed to happen when two
2408 : * backends are both trying to split btree index pages, and the second
2409 : * one just happens to be trying to split the page the first one got
2410 : * from StrategyGetBuffer.)
2411 : */
2412 521842 : content_lock = BufferDescriptorGetContentLock(buf_hdr);
2413 521842 : if (!LWLockConditionalAcquire(content_lock, LW_SHARED))
2414 : {
2415 : /*
2416 : * Someone else has locked the buffer, so give it up and loop back
2417 : * to get another one.
2418 : */
2419 0 : UnpinBuffer(buf_hdr);
2420 0 : goto again;
2421 : }
2422 :
2423 : /*
2424 : * If using a nondefault strategy, and writing the buffer would
2425 : * require a WAL flush, let the strategy decide whether to go ahead
2426 : * and write/reuse the buffer or to choose another victim. We need a
2427 : * lock to inspect the page LSN, so this can't be done inside
2428 : * StrategyGetBuffer.
2429 : */
2430 521842 : if (strategy != NULL)
2431 : {
2432 : XLogRecPtr lsn;
2433 :
2434 : /* Read the LSN while holding buffer header lock */
2435 145508 : buf_state = LockBufHdr(buf_hdr);
2436 145508 : lsn = BufferGetLSN(buf_hdr);
2437 145508 : UnlockBufHdr(buf_hdr, buf_state);
2438 :
2439 145508 : if (XLogNeedsFlush(lsn)
2440 16960 : && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
2441 : {
2442 11044 : LWLockRelease(content_lock);
2443 11044 : UnpinBuffer(buf_hdr);
2444 11044 : goto again;
2445 : }
2446 : }
2447 :
2448 : /* OK, do the I/O */
2449 510798 : FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
2450 510798 : LWLockRelease(content_lock);
2451 :
2452 510798 : ScheduleBufferTagForWriteback(&BackendWritebackContext, io_context,
2453 : &buf_hdr->tag);
2454 : }
2455 :
2456 :
2457 3738964 : if (buf_state & BM_VALID)
2458 : {
2459 : /*
2460 : * When a BufferAccessStrategy is in use, blocks evicted from shared
2461 : * buffers are counted as IOOP_EVICT in the corresponding context
2462 : * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
2463 : * strategy in two cases: 1) while initially claiming buffers for the
2464 : * strategy ring 2) to replace an existing strategy ring buffer
2465 : * because it is pinned or in use and cannot be reused.
2466 : *
2467 : * Blocks evicted from buffers already in the strategy ring are
2468 : * counted as IOOP_REUSE in the corresponding strategy context.
2469 : *
2470 : * At this point, we can accurately count evictions and reuses,
2471 : * because we have successfully claimed the valid buffer. Previously,
2472 : * we may have been forced to release the buffer due to concurrent
2473 : * pinners or erroring out.
2474 : */
2475 2288218 : pgstat_count_io_op(IOOBJECT_RELATION, io_context,
2476 2288218 : from_ring ? IOOP_REUSE : IOOP_EVICT, 1, 0);
2477 : }
2478 :
2479 : /*
2480 : * If the buffer has an entry in the buffer mapping table, delete it. This
2481 : * can fail because another backend could have pinned or dirtied the
2482 : * buffer.
2483 : */
2484 3738964 : if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
2485 : {
2486 674 : UnpinBuffer(buf_hdr);
2487 674 : goto again;
2488 : }
2489 :
2490 : /* a final set of sanity checks */
2491 : #ifdef USE_ASSERT_CHECKING
2492 : buf_state = pg_atomic_read_u32(&buf_hdr->state);
2493 :
2494 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2495 : Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
2496 :
2497 : CheckBufferIsPinnedOnce(buf);
2498 : #endif
2499 :
2500 3738290 : return buf;
2501 : }
2502 :
2503 : /*
2504 : * Return the maximum number of buffers that a backend should try to pin once,
2505 : * to avoid exceeding its fair share. This is the highest value that
2506 : * GetAdditionalPinLimit() could ever return. Note that it may be zero on a
2507 : * system with a very small buffer pool relative to max_connections.
2508 : */
2509 : uint32
2510 1204844 : GetPinLimit(void)
2511 : {
2512 1204844 : return MaxProportionalPins;
2513 : }
2514 :
2515 : /*
2516 : * Return the maximum number of additional buffers that this backend should
2517 : * pin if it wants to stay under the per-backend limit, considering the number
2518 : * of buffers it has already pinned. Unlike LimitAdditionalPins(), the limit
2519 : * return by this function can be zero.
2520 : */
2521 : uint32
2522 6746656 : GetAdditionalPinLimit(void)
2523 : {
2524 : uint32 estimated_pins_held;
2525 :
2526 : /*
2527 : * We get the number of "overflowed" pins for free, but don't know the
2528 : * number of pins in PrivateRefCountArray. The cost of calculating that
2529 : * exactly doesn't seem worth it, so just assume the max.
2530 : */
2531 6746656 : estimated_pins_held = PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
2532 :
2533 : /* Is this backend already holding more than its fair share? */
2534 6746656 : if (estimated_pins_held > MaxProportionalPins)
2535 2302038 : return 0;
2536 :
2537 4444618 : return MaxProportionalPins - estimated_pins_held;
2538 : }
2539 :
2540 : /*
2541 : * Limit the number of pins a batch operation may additionally acquire, to
2542 : * avoid running out of pinnable buffers.
2543 : *
2544 : * One additional pin is always allowed, on the assumption that the operation
2545 : * requires at least one to make progress.
2546 : */
2547 : void
2548 405202 : LimitAdditionalPins(uint32 *additional_pins)
2549 : {
2550 : uint32 limit;
2551 :
2552 405202 : if (*additional_pins <= 1)
2553 384670 : return;
2554 :
2555 20532 : limit = GetAdditionalPinLimit();
2556 20532 : limit = Max(limit, 1);
2557 20532 : if (limit < *additional_pins)
2558 10930 : *additional_pins = limit;
2559 : }
2560 :
2561 : /*
2562 : * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
2563 : * avoid duplicating the tracing and relpersistence related logic.
2564 : */
2565 : static BlockNumber
2566 427906 : ExtendBufferedRelCommon(BufferManagerRelation bmr,
2567 : ForkNumber fork,
2568 : BufferAccessStrategy strategy,
2569 : uint32 flags,
2570 : uint32 extend_by,
2571 : BlockNumber extend_upto,
2572 : Buffer *buffers,
2573 : uint32 *extended_by)
2574 : {
2575 : BlockNumber first_block;
2576 :
2577 : TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
2578 : bmr.smgr->smgr_rlocator.locator.spcOid,
2579 : bmr.smgr->smgr_rlocator.locator.dbOid,
2580 : bmr.smgr->smgr_rlocator.locator.relNumber,
2581 : bmr.smgr->smgr_rlocator.backend,
2582 : extend_by);
2583 :
2584 427906 : if (bmr.relpersistence == RELPERSISTENCE_TEMP)
2585 22704 : first_block = ExtendBufferedRelLocal(bmr, fork, flags,
2586 : extend_by, extend_upto,
2587 : buffers, &extend_by);
2588 : else
2589 405202 : first_block = ExtendBufferedRelShared(bmr, fork, strategy, flags,
2590 : extend_by, extend_upto,
2591 : buffers, &extend_by);
2592 427906 : *extended_by = extend_by;
2593 :
2594 : TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
2595 : bmr.smgr->smgr_rlocator.locator.spcOid,
2596 : bmr.smgr->smgr_rlocator.locator.dbOid,
2597 : bmr.smgr->smgr_rlocator.locator.relNumber,
2598 : bmr.smgr->smgr_rlocator.backend,
2599 : *extended_by,
2600 : first_block);
2601 :
2602 427906 : return first_block;
2603 : }
2604 :
2605 : /*
2606 : * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
2607 : * shared buffers.
2608 : */
2609 : static BlockNumber
2610 405202 : ExtendBufferedRelShared(BufferManagerRelation bmr,
2611 : ForkNumber fork,
2612 : BufferAccessStrategy strategy,
2613 : uint32 flags,
2614 : uint32 extend_by,
2615 : BlockNumber extend_upto,
2616 : Buffer *buffers,
2617 : uint32 *extended_by)
2618 : {
2619 : BlockNumber first_block;
2620 405202 : IOContext io_context = IOContextForStrategy(strategy);
2621 : instr_time io_start;
2622 :
2623 405202 : LimitAdditionalPins(&extend_by);
2624 :
2625 : /*
2626 : * Acquire victim buffers for extension without holding extension lock.
2627 : * Writing out victim buffers is the most expensive part of extending the
2628 : * relation, particularly when doing so requires WAL flushes. Zeroing out
2629 : * the buffers is also quite expensive, so do that before holding the
2630 : * extension lock as well.
2631 : *
2632 : * These pages are pinned by us and not valid. While we hold the pin they
2633 : * can't be acquired as victim buffers by another backend.
2634 : */
2635 862758 : for (uint32 i = 0; i < extend_by; i++)
2636 : {
2637 : Block buf_block;
2638 :
2639 457556 : buffers[i] = GetVictimBuffer(strategy, io_context);
2640 457556 : buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
2641 :
2642 : /* new buffers are zero-filled */
2643 457556 : MemSet(buf_block, 0, BLCKSZ);
2644 : }
2645 :
2646 : /*
2647 : * Lock relation against concurrent extensions, unless requested not to.
2648 : *
2649 : * We use the same extension lock for all forks. That's unnecessarily
2650 : * restrictive, but currently extensions for forks don't happen often
2651 : * enough to make it worth locking more granularly.
2652 : *
2653 : * Note that another backend might have extended the relation by the time
2654 : * we get the lock.
2655 : */
2656 405202 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
2657 301726 : LockRelationForExtension(bmr.rel, ExclusiveLock);
2658 :
2659 : /*
2660 : * If requested, invalidate size cache, so that smgrnblocks asks the
2661 : * kernel.
2662 : */
2663 405202 : if (flags & EB_CLEAR_SIZE_CACHE)
2664 15566 : bmr.smgr->smgr_cached_nblocks[fork] = InvalidBlockNumber;
2665 :
2666 405202 : first_block = smgrnblocks(bmr.smgr, fork);
2667 :
2668 : /*
2669 : * Now that we have the accurate relation size, check if the caller wants
2670 : * us to extend to only up to a specific size. If there were concurrent
2671 : * extensions, we might have acquired too many buffers and need to release
2672 : * them.
2673 : */
2674 405202 : if (extend_upto != InvalidBlockNumber)
2675 : {
2676 106394 : uint32 orig_extend_by = extend_by;
2677 :
2678 106394 : if (first_block > extend_upto)
2679 0 : extend_by = 0;
2680 106394 : else if ((uint64) first_block + extend_by > extend_upto)
2681 16 : extend_by = extend_upto - first_block;
2682 :
2683 106426 : for (uint32 i = extend_by; i < orig_extend_by; i++)
2684 : {
2685 32 : BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
2686 :
2687 : /*
2688 : * The victim buffer we acquired previously is clean and unused,
2689 : * let it be found again quickly
2690 : */
2691 32 : StrategyFreeBuffer(buf_hdr);
2692 32 : UnpinBuffer(buf_hdr);
2693 : }
2694 :
2695 106394 : if (extend_by == 0)
2696 : {
2697 16 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
2698 16 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
2699 16 : *extended_by = extend_by;
2700 16 : return first_block;
2701 : }
2702 : }
2703 :
2704 : /* Fail if relation is already at maximum possible length */
2705 405186 : if ((uint64) first_block + extend_by >= MaxBlockNumber)
2706 0 : ereport(ERROR,
2707 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2708 : errmsg("cannot extend relation %s beyond %u blocks",
2709 : relpath(bmr.smgr->smgr_rlocator, fork).str,
2710 : MaxBlockNumber)));
2711 :
2712 : /*
2713 : * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
2714 : *
2715 : * This needs to happen before we extend the relation, because as soon as
2716 : * we do, other backends can start to read in those pages.
2717 : */
2718 862710 : for (uint32 i = 0; i < extend_by; i++)
2719 : {
2720 457524 : Buffer victim_buf = buffers[i];
2721 457524 : BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
2722 : BufferTag tag;
2723 : uint32 hash;
2724 : LWLock *partition_lock;
2725 : int existing_id;
2726 :
2727 : /* in case we need to pin an existing buffer below */
2728 457524 : ResourceOwnerEnlarge(CurrentResourceOwner);
2729 457524 : ReservePrivateRefCountEntry();
2730 :
2731 457524 : InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
2732 457524 : hash = BufTableHashCode(&tag);
2733 457524 : partition_lock = BufMappingPartitionLock(hash);
2734 :
2735 457524 : LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2736 :
2737 457524 : existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
2738 :
2739 : /*
2740 : * We get here only in the corner case where we are trying to extend
2741 : * the relation but we found a pre-existing buffer. This can happen
2742 : * because a prior attempt at extending the relation failed, and
2743 : * because mdread doesn't complain about reads beyond EOF (when
2744 : * zero_damaged_pages is ON) and so a previous attempt to read a block
2745 : * beyond EOF could have left a "valid" zero-filled buffer.
2746 : * Unfortunately, we have also seen this case occurring because of
2747 : * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
2748 : * that doesn't account for a recent write. In that situation, the
2749 : * pre-existing buffer would contain valid data that we don't want to
2750 : * overwrite. Since the legitimate cases should always have left a
2751 : * zero-filled buffer, complain if not PageIsNew.
2752 : */
2753 457524 : if (existing_id >= 0)
2754 : {
2755 0 : BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
2756 : Block buf_block;
2757 : bool valid;
2758 :
2759 : /*
2760 : * Pin the existing buffer before releasing the partition lock,
2761 : * preventing it from being evicted.
2762 : */
2763 0 : valid = PinBuffer(existing_hdr, strategy);
2764 :
2765 0 : LWLockRelease(partition_lock);
2766 :
2767 : /*
2768 : * The victim buffer we acquired previously is clean and unused,
2769 : * let it be found again quickly
2770 : */
2771 0 : StrategyFreeBuffer(victim_buf_hdr);
2772 0 : UnpinBuffer(victim_buf_hdr);
2773 :
2774 0 : buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
2775 0 : buf_block = BufHdrGetBlock(existing_hdr);
2776 :
2777 0 : if (valid && !PageIsNew((Page) buf_block))
2778 0 : ereport(ERROR,
2779 : (errmsg("unexpected data beyond EOF in block %u of relation %s",
2780 : existing_hdr->tag.blockNum,
2781 : relpath(bmr.smgr->smgr_rlocator, fork).str),
2782 : errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
2783 :
2784 : /*
2785 : * We *must* do smgr[zero]extend before succeeding, else the page
2786 : * will not be reserved by the kernel, and the next P_NEW call
2787 : * will decide to return the same page. Clear the BM_VALID bit,
2788 : * do StartBufferIO() and proceed.
2789 : *
2790 : * Loop to handle the very small possibility that someone re-sets
2791 : * BM_VALID between our clearing it and StartBufferIO inspecting
2792 : * it.
2793 : */
2794 : do
2795 : {
2796 0 : uint32 buf_state = LockBufHdr(existing_hdr);
2797 :
2798 0 : buf_state &= ~BM_VALID;
2799 0 : UnlockBufHdr(existing_hdr, buf_state);
2800 0 : } while (!StartBufferIO(existing_hdr, true, false));
2801 : }
2802 : else
2803 : {
2804 : uint32 buf_state;
2805 :
2806 457524 : buf_state = LockBufHdr(victim_buf_hdr);
2807 :
2808 : /* some sanity checks while we hold the buffer header lock */
2809 : Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
2810 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2811 :
2812 457524 : victim_buf_hdr->tag = tag;
2813 :
2814 457524 : buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
2815 457524 : if (bmr.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
2816 446954 : buf_state |= BM_PERMANENT;
2817 :
2818 457524 : UnlockBufHdr(victim_buf_hdr, buf_state);
2819 :
2820 457524 : LWLockRelease(partition_lock);
2821 :
2822 : /* XXX: could combine the locked operations in it with the above */
2823 457524 : StartBufferIO(victim_buf_hdr, true, false);
2824 : }
2825 : }
2826 :
2827 405186 : io_start = pgstat_prepare_io_time(track_io_timing);
2828 :
2829 : /*
2830 : * Note: if smgrzeroextend fails, we will end up with buffers that are
2831 : * allocated but not marked BM_VALID. The next relation extension will
2832 : * still select the same block number (because the relation didn't get any
2833 : * longer on disk) and so future attempts to extend the relation will find
2834 : * the same buffers (if they have not been recycled) but come right back
2835 : * here to try smgrzeroextend again.
2836 : *
2837 : * We don't need to set checksum for all-zero pages.
2838 : */
2839 405186 : smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
2840 :
2841 : /*
2842 : * Release the file-extension lock; it's now OK for someone else to extend
2843 : * the relation some more.
2844 : *
2845 : * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
2846 : * take noticeable time.
2847 : */
2848 405186 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
2849 301710 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
2850 :
2851 405186 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context, IOOP_EXTEND,
2852 405186 : io_start, 1, extend_by * BLCKSZ);
2853 :
2854 : /* Set BM_VALID, terminate IO, and wake up any waiters */
2855 862710 : for (uint32 i = 0; i < extend_by; i++)
2856 : {
2857 457524 : Buffer buf = buffers[i];
2858 457524 : BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
2859 457524 : bool lock = false;
2860 :
2861 457524 : if (flags & EB_LOCK_FIRST && i == 0)
2862 298222 : lock = true;
2863 159302 : else if (flags & EB_LOCK_TARGET)
2864 : {
2865 : Assert(extend_upto != InvalidBlockNumber);
2866 88712 : if (first_block + i + 1 == extend_upto)
2867 87568 : lock = true;
2868 : }
2869 :
2870 457524 : if (lock)
2871 385790 : LWLockAcquire(BufferDescriptorGetContentLock(buf_hdr), LW_EXCLUSIVE);
2872 :
2873 457524 : TerminateBufferIO(buf_hdr, false, BM_VALID, true, false);
2874 : }
2875 :
2876 405186 : pgBufferUsage.shared_blks_written += extend_by;
2877 :
2878 405186 : *extended_by = extend_by;
2879 :
2880 405186 : return first_block;
2881 : }
2882 :
2883 : /*
2884 : * BufferIsExclusiveLocked
2885 : *
2886 : * Checks if buffer is exclusive-locked.
2887 : *
2888 : * Buffer must be pinned.
2889 : */
2890 : bool
2891 0 : BufferIsExclusiveLocked(Buffer buffer)
2892 : {
2893 : BufferDesc *bufHdr;
2894 :
2895 : Assert(BufferIsPinned(buffer));
2896 :
2897 0 : if (BufferIsLocal(buffer))
2898 : {
2899 : /* Content locks are not maintained for local buffers. */
2900 0 : return true;
2901 : }
2902 : else
2903 : {
2904 0 : bufHdr = GetBufferDescriptor(buffer - 1);
2905 0 : return LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
2906 : LW_EXCLUSIVE);
2907 : }
2908 : }
2909 :
2910 : /*
2911 : * BufferIsDirty
2912 : *
2913 : * Checks if buffer is already dirty.
2914 : *
2915 : * Buffer must be pinned and exclusive-locked. (Without an exclusive lock,
2916 : * the result may be stale before it's returned.)
2917 : */
2918 : bool
2919 0 : BufferIsDirty(Buffer buffer)
2920 : {
2921 : BufferDesc *bufHdr;
2922 :
2923 : Assert(BufferIsPinned(buffer));
2924 :
2925 0 : if (BufferIsLocal(buffer))
2926 : {
2927 0 : int bufid = -buffer - 1;
2928 :
2929 0 : bufHdr = GetLocalBufferDescriptor(bufid);
2930 : /* Content locks are not maintained for local buffers. */
2931 : }
2932 : else
2933 : {
2934 0 : bufHdr = GetBufferDescriptor(buffer - 1);
2935 : Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
2936 : LW_EXCLUSIVE));
2937 : }
2938 :
2939 0 : return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2940 : }
2941 :
2942 : /*
2943 : * MarkBufferDirty
2944 : *
2945 : * Marks buffer contents as dirty (actual write happens later).
2946 : *
2947 : * Buffer must be pinned and exclusive-locked. (If caller does not hold
2948 : * exclusive lock, then somebody could be in process of writing the buffer,
2949 : * leading to risk of bad data written to disk.)
2950 : */
2951 : void
2952 43348150 : MarkBufferDirty(Buffer buffer)
2953 : {
2954 : BufferDesc *bufHdr;
2955 : uint32 buf_state;
2956 : uint32 old_buf_state;
2957 :
2958 43348150 : if (!BufferIsValid(buffer))
2959 0 : elog(ERROR, "bad buffer ID: %d", buffer);
2960 :
2961 43348150 : if (BufferIsLocal(buffer))
2962 : {
2963 2433254 : MarkLocalBufferDirty(buffer);
2964 2433254 : return;
2965 : }
2966 :
2967 40914896 : bufHdr = GetBufferDescriptor(buffer - 1);
2968 :
2969 : Assert(BufferIsPinned(buffer));
2970 : Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
2971 : LW_EXCLUSIVE));
2972 :
2973 40914896 : old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2974 : for (;;)
2975 : {
2976 40915120 : if (old_buf_state & BM_LOCKED)
2977 72 : old_buf_state = WaitBufHdrUnlocked(bufHdr);
2978 :
2979 40915120 : buf_state = old_buf_state;
2980 :
2981 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2982 40915120 : buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2983 :
2984 40915120 : if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2985 : buf_state))
2986 40914896 : break;
2987 : }
2988 :
2989 : /*
2990 : * If the buffer was not dirty already, do vacuum accounting.
2991 : */
2992 40914896 : if (!(old_buf_state & BM_DIRTY))
2993 : {
2994 1296608 : pgBufferUsage.shared_blks_dirtied++;
2995 1296608 : if (VacuumCostActive)
2996 14046 : VacuumCostBalance += VacuumCostPageDirty;
2997 : }
2998 : }
2999 :
3000 : /*
3001 : * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
3002 : *
3003 : * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
3004 : * compared to calling the two routines separately. Now it's mainly just
3005 : * a convenience function. However, if the passed buffer is valid and
3006 : * already contains the desired block, we just return it as-is; and that
3007 : * does save considerable work compared to a full release and reacquire.
3008 : *
3009 : * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
3010 : * buffer actually needs to be released. This case is the same as ReadBuffer,
3011 : * but can save some tests in the caller.
3012 : */
3013 : Buffer
3014 56700584 : ReleaseAndReadBuffer(Buffer buffer,
3015 : Relation relation,
3016 : BlockNumber blockNum)
3017 : {
3018 56700584 : ForkNumber forkNum = MAIN_FORKNUM;
3019 : BufferDesc *bufHdr;
3020 :
3021 56700584 : if (BufferIsValid(buffer))
3022 : {
3023 : Assert(BufferIsPinned(buffer));
3024 34119932 : if (BufferIsLocal(buffer))
3025 : {
3026 73716 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3027 80760 : if (bufHdr->tag.blockNum == blockNum &&
3028 14088 : BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
3029 7044 : BufTagGetForkNum(&bufHdr->tag) == forkNum)
3030 7044 : return buffer;
3031 66672 : UnpinLocalBuffer(buffer);
3032 : }
3033 : else
3034 : {
3035 34046216 : bufHdr = GetBufferDescriptor(buffer - 1);
3036 : /* we have pin, so it's ok to examine tag without spinlock */
3037 45847120 : if (bufHdr->tag.blockNum == blockNum &&
3038 23601808 : BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
3039 11800904 : BufTagGetForkNum(&bufHdr->tag) == forkNum)
3040 11800904 : return buffer;
3041 22245312 : UnpinBuffer(bufHdr);
3042 : }
3043 : }
3044 :
3045 44892636 : return ReadBuffer(relation, blockNum);
3046 : }
3047 :
3048 : /*
3049 : * PinBuffer -- make buffer unavailable for replacement.
3050 : *
3051 : * For the default access strategy, the buffer's usage_count is incremented
3052 : * when we first pin it; for other strategies we just make sure the usage_count
3053 : * isn't zero. (The idea of the latter is that we don't want synchronized
3054 : * heap scans to inflate the count, but we need it to not be zero to discourage
3055 : * other backends from stealing buffers from our ring. As long as we cycle
3056 : * through the ring faster than the global clock-sweep cycles, buffers in
3057 : * our ring won't be chosen as victims for replacement by other backends.)
3058 : *
3059 : * This should be applied only to shared buffers, never local ones.
3060 : *
3061 : * Since buffers are pinned/unpinned very frequently, pin buffers without
3062 : * taking the buffer header lock; instead update the state variable in loop of
3063 : * CAS operations. Hopefully it's just a single CAS.
3064 : *
3065 : * Note that ResourceOwnerEnlarge() and ReservePrivateRefCountEntry()
3066 : * must have been done already.
3067 : *
3068 : * Returns true if buffer is BM_VALID, else false. This provision allows
3069 : * some callers to avoid an extra spinlock cycle.
3070 : */
3071 : static bool
3072 116110104 : PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
3073 : {
3074 116110104 : Buffer b = BufferDescriptorGetBuffer(buf);
3075 : bool result;
3076 : PrivateRefCountEntry *ref;
3077 :
3078 : Assert(!BufferIsLocal(b));
3079 : Assert(ReservedRefCountEntry != NULL);
3080 :
3081 116110104 : ref = GetPrivateRefCountEntry(b, true);
3082 :
3083 116110104 : if (ref == NULL)
3084 : {
3085 : uint32 buf_state;
3086 : uint32 old_buf_state;
3087 :
3088 111799952 : ref = NewPrivateRefCountEntry(b);
3089 :
3090 111799952 : old_buf_state = pg_atomic_read_u32(&buf->state);
3091 : for (;;)
3092 : {
3093 111831370 : if (old_buf_state & BM_LOCKED)
3094 1214 : old_buf_state = WaitBufHdrUnlocked(buf);
3095 :
3096 111831370 : buf_state = old_buf_state;
3097 :
3098 : /* increase refcount */
3099 111831370 : buf_state += BUF_REFCOUNT_ONE;
3100 :
3101 111831370 : if (strategy == NULL)
3102 : {
3103 : /* Default case: increase usagecount unless already max. */
3104 110678374 : if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
3105 6437946 : buf_state += BUF_USAGECOUNT_ONE;
3106 : }
3107 : else
3108 : {
3109 : /*
3110 : * Ring buffers shouldn't evict others from pool. Thus we
3111 : * don't make usagecount more than 1.
3112 : */
3113 1152996 : if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3114 63868 : buf_state += BUF_USAGECOUNT_ONE;
3115 : }
3116 :
3117 111831370 : if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
3118 : buf_state))
3119 : {
3120 111799952 : result = (buf_state & BM_VALID) != 0;
3121 :
3122 : /*
3123 : * Assume that we acquired a buffer pin for the purposes of
3124 : * Valgrind buffer client checks (even in !result case) to
3125 : * keep things simple. Buffers that are unsafe to access are
3126 : * not generally guaranteed to be marked undefined or
3127 : * non-accessible in any case.
3128 : */
3129 : VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
3130 111799952 : break;
3131 : }
3132 : }
3133 : }
3134 : else
3135 : {
3136 : /*
3137 : * If we previously pinned the buffer, it is likely to be valid, but
3138 : * it may not be if StartReadBuffers() was called and
3139 : * WaitReadBuffers() hasn't been called yet. We'll check by loading
3140 : * the flags without locking. This is racy, but it's OK to return
3141 : * false spuriously: when WaitReadBuffers() calls StartBufferIO(),
3142 : * it'll see that it's now valid.
3143 : *
3144 : * Note: We deliberately avoid a Valgrind client request here.
3145 : * Individual access methods can optionally superimpose buffer page
3146 : * client requests on top of our client requests to enforce that
3147 : * buffers are only accessed while locked (and pinned). It's possible
3148 : * that the buffer page is legitimately non-accessible here. We
3149 : * cannot meddle with that.
3150 : */
3151 4310152 : result = (pg_atomic_read_u32(&buf->state) & BM_VALID) != 0;
3152 : }
3153 :
3154 116110104 : ref->refcount++;
3155 : Assert(ref->refcount > 0);
3156 116110104 : ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
3157 116110104 : return result;
3158 : }
3159 :
3160 : /*
3161 : * PinBuffer_Locked -- as above, but caller already locked the buffer header.
3162 : * The spinlock is released before return.
3163 : *
3164 : * As this function is called with the spinlock held, the caller has to
3165 : * previously call ReservePrivateRefCountEntry() and
3166 : * ResourceOwnerEnlarge(CurrentResourceOwner);
3167 : *
3168 : * Currently, no callers of this function want to modify the buffer's
3169 : * usage_count at all, so there's no need for a strategy parameter.
3170 : * Also we don't bother with a BM_VALID test (the caller could check that for
3171 : * itself).
3172 : *
3173 : * Also all callers only ever use this function when it's known that the
3174 : * buffer can't have a preexisting pin by this backend. That allows us to skip
3175 : * searching the private refcount array & hash, which is a boon, because the
3176 : * spinlock is still held.
3177 : *
3178 : * Note: use of this routine is frequently mandatory, not just an optimization
3179 : * to save a spin lock/unlock cycle, because we need to pin a buffer before
3180 : * its state can change under us.
3181 : */
3182 : static void
3183 4352666 : PinBuffer_Locked(BufferDesc *buf)
3184 : {
3185 : Buffer b;
3186 : PrivateRefCountEntry *ref;
3187 : uint32 buf_state;
3188 :
3189 : /*
3190 : * As explained, We don't expect any preexisting pins. That allows us to
3191 : * manipulate the PrivateRefCount after releasing the spinlock
3192 : */
3193 : Assert(GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf), false) == NULL);
3194 :
3195 : /*
3196 : * Buffer can't have a preexisting pin, so mark its page as defined to
3197 : * Valgrind (this is similar to the PinBuffer() case where the backend
3198 : * doesn't already have a buffer pin)
3199 : */
3200 : VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
3201 :
3202 : /*
3203 : * Since we hold the buffer spinlock, we can update the buffer state and
3204 : * release the lock in one operation.
3205 : */
3206 4352666 : buf_state = pg_atomic_read_u32(&buf->state);
3207 : Assert(buf_state & BM_LOCKED);
3208 4352666 : buf_state += BUF_REFCOUNT_ONE;
3209 4352666 : UnlockBufHdr(buf, buf_state);
3210 :
3211 4352666 : b = BufferDescriptorGetBuffer(buf);
3212 :
3213 4352666 : ref = NewPrivateRefCountEntry(b);
3214 4352666 : ref->refcount++;
3215 :
3216 4352666 : ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
3217 4352666 : }
3218 :
3219 : /*
3220 : * Support for waking up another backend that is waiting for the cleanup lock
3221 : * to be released using BM_PIN_COUNT_WAITER.
3222 : *
3223 : * See LockBufferForCleanup().
3224 : *
3225 : * Expected to be called just after releasing a buffer pin (in a BufferDesc,
3226 : * not just reducing the backend-local pincount for the buffer).
3227 : */
3228 : static void
3229 148 : WakePinCountWaiter(BufferDesc *buf)
3230 : {
3231 : /*
3232 : * Acquire the buffer header lock, re-check that there's a waiter. Another
3233 : * backend could have unpinned this buffer, and already woken up the
3234 : * waiter.
3235 : *
3236 : * There's no danger of the buffer being replaced after we unpinned it
3237 : * above, as it's pinned by the waiter. The waiter removes
3238 : * BM_PIN_COUNT_WAITER if it stops waiting for a reason other than this
3239 : * backend waking it up.
3240 : */
3241 148 : uint32 buf_state = LockBufHdr(buf);
3242 :
3243 148 : if ((buf_state & BM_PIN_COUNT_WAITER) &&
3244 148 : BUF_STATE_GET_REFCOUNT(buf_state) == 1)
3245 144 : {
3246 : /* we just released the last pin other than the waiter's */
3247 144 : int wait_backend_pgprocno = buf->wait_backend_pgprocno;
3248 :
3249 144 : buf_state &= ~BM_PIN_COUNT_WAITER;
3250 144 : UnlockBufHdr(buf, buf_state);
3251 144 : ProcSendSignal(wait_backend_pgprocno);
3252 : }
3253 : else
3254 4 : UnlockBufHdr(buf, buf_state);
3255 148 : }
3256 :
3257 : /*
3258 : * UnpinBuffer -- make buffer available for replacement.
3259 : *
3260 : * This should be applied only to shared buffers, never local ones. This
3261 : * always adjusts CurrentResourceOwner.
3262 : */
3263 : static void
3264 142623446 : UnpinBuffer(BufferDesc *buf)
3265 : {
3266 142623446 : Buffer b = BufferDescriptorGetBuffer(buf);
3267 :
3268 142623446 : ResourceOwnerForgetBuffer(CurrentResourceOwner, b);
3269 142623446 : UnpinBufferNoOwner(buf);
3270 142623446 : }
3271 :
3272 : static void
3273 142632232 : UnpinBufferNoOwner(BufferDesc *buf)
3274 : {
3275 : PrivateRefCountEntry *ref;
3276 142632232 : Buffer b = BufferDescriptorGetBuffer(buf);
3277 :
3278 : Assert(!BufferIsLocal(b));
3279 :
3280 : /* not moving as we're likely deleting it soon anyway */
3281 142632232 : ref = GetPrivateRefCountEntry(b, false);
3282 : Assert(ref != NULL);
3283 : Assert(ref->refcount > 0);
3284 142632232 : ref->refcount--;
3285 142632232 : if (ref->refcount == 0)
3286 : {
3287 : uint32 buf_state;
3288 : uint32 old_buf_state;
3289 :
3290 : /*
3291 : * Mark buffer non-accessible to Valgrind.
3292 : *
3293 : * Note that the buffer may have already been marked non-accessible
3294 : * within access method code that enforces that buffers are only
3295 : * accessed while a buffer lock is held.
3296 : */
3297 : VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
3298 :
3299 : /* I'd better not still hold the buffer content lock */
3300 : Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
3301 :
3302 : /*
3303 : * Decrement the shared reference count.
3304 : *
3305 : * Since buffer spinlock holder can update status using just write,
3306 : * it's not safe to use atomic decrement here; thus use a CAS loop.
3307 : */
3308 116152618 : old_buf_state = pg_atomic_read_u32(&buf->state);
3309 : for (;;)
3310 : {
3311 116185700 : if (old_buf_state & BM_LOCKED)
3312 1252 : old_buf_state = WaitBufHdrUnlocked(buf);
3313 :
3314 116185700 : buf_state = old_buf_state;
3315 :
3316 116185700 : buf_state -= BUF_REFCOUNT_ONE;
3317 :
3318 116185700 : if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
3319 : buf_state))
3320 116152618 : break;
3321 : }
3322 :
3323 : /* Support LockBufferForCleanup() */
3324 116152618 : if (buf_state & BM_PIN_COUNT_WAITER)
3325 148 : WakePinCountWaiter(buf);
3326 :
3327 116152618 : ForgetPrivateRefCountEntry(ref);
3328 : }
3329 142632232 : }
3330 :
3331 : #define ST_SORT sort_checkpoint_bufferids
3332 : #define ST_ELEMENT_TYPE CkptSortItem
3333 : #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
3334 : #define ST_SCOPE static
3335 : #define ST_DEFINE
3336 : #include "lib/sort_template.h"
3337 :
3338 : /*
3339 : * BufferSync -- Write out all dirty buffers in the pool.
3340 : *
3341 : * This is called at checkpoint time to write out all dirty shared buffers.
3342 : * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
3343 : * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
3344 : * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
3345 : * unlogged buffers, which are otherwise skipped. The remaining flags
3346 : * currently have no effect here.
3347 : */
3348 : static void
3349 3362 : BufferSync(int flags)
3350 : {
3351 : uint32 buf_state;
3352 : int buf_id;
3353 : int num_to_scan;
3354 : int num_spaces;
3355 : int num_processed;
3356 : int num_written;
3357 3362 : CkptTsStatus *per_ts_stat = NULL;
3358 : Oid last_tsid;
3359 : binaryheap *ts_heap;
3360 : int i;
3361 3362 : int mask = BM_DIRTY;
3362 : WritebackContext wb_context;
3363 :
3364 : /*
3365 : * Unless this is a shutdown checkpoint or we have been explicitly told,
3366 : * we write only permanent, dirty buffers. But at shutdown or end of
3367 : * recovery, we write all dirty buffers.
3368 : */
3369 3362 : if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
3370 : CHECKPOINT_FLUSH_ALL))))
3371 1926 : mask |= BM_PERMANENT;
3372 :
3373 : /*
3374 : * Loop over all buffers, and mark the ones that need to be written with
3375 : * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
3376 : * can estimate how much work needs to be done.
3377 : *
3378 : * This allows us to write only those pages that were dirty when the
3379 : * checkpoint began, and not those that get dirtied while it proceeds.
3380 : * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
3381 : * later in this function, or by normal backends or the bgwriter cleaning
3382 : * scan, the flag is cleared. Any buffer dirtied after this point won't
3383 : * have the flag set.
3384 : *
3385 : * Note that if we fail to write some buffer, we may leave buffers with
3386 : * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
3387 : * certainly need to be written for the next checkpoint attempt, too.
3388 : */
3389 3362 : num_to_scan = 0;
3390 24101826 : for (buf_id = 0; buf_id < NBuffers; buf_id++)
3391 : {
3392 24098464 : BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
3393 :
3394 : /*
3395 : * Header spinlock is enough to examine BM_DIRTY, see comment in
3396 : * SyncOneBuffer.
3397 : */
3398 24098464 : buf_state = LockBufHdr(bufHdr);
3399 :
3400 24098464 : if ((buf_state & mask) == mask)
3401 : {
3402 : CkptSortItem *item;
3403 :
3404 580032 : buf_state |= BM_CHECKPOINT_NEEDED;
3405 :
3406 580032 : item = &CkptBufferIds[num_to_scan++];
3407 580032 : item->buf_id = buf_id;
3408 580032 : item->tsId = bufHdr->tag.spcOid;
3409 580032 : item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
3410 580032 : item->forkNum = BufTagGetForkNum(&bufHdr->tag);
3411 580032 : item->blockNum = bufHdr->tag.blockNum;
3412 : }
3413 :
3414 24098464 : UnlockBufHdr(bufHdr, buf_state);
3415 :
3416 : /* Check for barrier events in case NBuffers is large. */
3417 24098464 : if (ProcSignalBarrierPending)
3418 0 : ProcessProcSignalBarrier();
3419 : }
3420 :
3421 3362 : if (num_to_scan == 0)
3422 1286 : return; /* nothing to do */
3423 :
3424 2076 : WritebackContextInit(&wb_context, &checkpoint_flush_after);
3425 :
3426 : TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
3427 :
3428 : /*
3429 : * Sort buffers that need to be written to reduce the likelihood of random
3430 : * IO. The sorting is also important for the implementation of balancing
3431 : * writes between tablespaces. Without balancing writes we'd potentially
3432 : * end up writing to the tablespaces one-by-one; possibly overloading the
3433 : * underlying system.
3434 : */
3435 2076 : sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
3436 :
3437 2076 : num_spaces = 0;
3438 :
3439 : /*
3440 : * Allocate progress status for each tablespace with buffers that need to
3441 : * be flushed. This requires the to-be-flushed array to be sorted.
3442 : */
3443 2076 : last_tsid = InvalidOid;
3444 582108 : for (i = 0; i < num_to_scan; i++)
3445 : {
3446 : CkptTsStatus *s;
3447 : Oid cur_tsid;
3448 :
3449 580032 : cur_tsid = CkptBufferIds[i].tsId;
3450 :
3451 : /*
3452 : * Grow array of per-tablespace status structs, every time a new
3453 : * tablespace is found.
3454 : */
3455 580032 : if (last_tsid == InvalidOid || last_tsid != cur_tsid)
3456 3102 : {
3457 : Size sz;
3458 :
3459 3102 : num_spaces++;
3460 :
3461 : /*
3462 : * Not worth adding grow-by-power-of-2 logic here - even with a
3463 : * few hundred tablespaces this should be fine.
3464 : */
3465 3102 : sz = sizeof(CkptTsStatus) * num_spaces;
3466 :
3467 3102 : if (per_ts_stat == NULL)
3468 2076 : per_ts_stat = (CkptTsStatus *) palloc(sz);
3469 : else
3470 1026 : per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
3471 :
3472 3102 : s = &per_ts_stat[num_spaces - 1];
3473 3102 : memset(s, 0, sizeof(*s));
3474 3102 : s->tsId = cur_tsid;
3475 :
3476 : /*
3477 : * The first buffer in this tablespace. As CkptBufferIds is sorted
3478 : * by tablespace all (s->num_to_scan) buffers in this tablespace
3479 : * will follow afterwards.
3480 : */
3481 3102 : s->index = i;
3482 :
3483 : /*
3484 : * progress_slice will be determined once we know how many buffers
3485 : * are in each tablespace, i.e. after this loop.
3486 : */
3487 :
3488 3102 : last_tsid = cur_tsid;
3489 : }
3490 : else
3491 : {
3492 576930 : s = &per_ts_stat[num_spaces - 1];
3493 : }
3494 :
3495 580032 : s->num_to_scan++;
3496 :
3497 : /* Check for barrier events. */
3498 580032 : if (ProcSignalBarrierPending)
3499 0 : ProcessProcSignalBarrier();
3500 : }
3501 :
3502 : Assert(num_spaces > 0);
3503 :
3504 : /*
3505 : * Build a min-heap over the write-progress in the individual tablespaces,
3506 : * and compute how large a portion of the total progress a single
3507 : * processed buffer is.
3508 : */
3509 2076 : ts_heap = binaryheap_allocate(num_spaces,
3510 : ts_ckpt_progress_comparator,
3511 : NULL);
3512 :
3513 5178 : for (i = 0; i < num_spaces; i++)
3514 : {
3515 3102 : CkptTsStatus *ts_stat = &per_ts_stat[i];
3516 :
3517 3102 : ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
3518 :
3519 3102 : binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
3520 : }
3521 :
3522 2076 : binaryheap_build(ts_heap);
3523 :
3524 : /*
3525 : * Iterate through to-be-checkpointed buffers and write the ones (still)
3526 : * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
3527 : * tablespaces; otherwise the sorting would lead to only one tablespace
3528 : * receiving writes at a time, making inefficient use of the hardware.
3529 : */
3530 2076 : num_processed = 0;
3531 2076 : num_written = 0;
3532 582108 : while (!binaryheap_empty(ts_heap))
3533 : {
3534 580032 : BufferDesc *bufHdr = NULL;
3535 : CkptTsStatus *ts_stat = (CkptTsStatus *)
3536 580032 : DatumGetPointer(binaryheap_first(ts_heap));
3537 :
3538 580032 : buf_id = CkptBufferIds[ts_stat->index].buf_id;
3539 : Assert(buf_id != -1);
3540 :
3541 580032 : bufHdr = GetBufferDescriptor(buf_id);
3542 :
3543 580032 : num_processed++;
3544 :
3545 : /*
3546 : * We don't need to acquire the lock here, because we're only looking
3547 : * at a single bit. It's possible that someone else writes the buffer
3548 : * and clears the flag right after we check, but that doesn't matter
3549 : * since SyncOneBuffer will then do nothing. However, there is a
3550 : * further race condition: it's conceivable that between the time we
3551 : * examine the bit here and the time SyncOneBuffer acquires the lock,
3552 : * someone else not only wrote the buffer but replaced it with another
3553 : * page and dirtied it. In that improbable case, SyncOneBuffer will
3554 : * write the buffer though we didn't need to. It doesn't seem worth
3555 : * guarding against this, though.
3556 : */
3557 580032 : if (pg_atomic_read_u32(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
3558 : {
3559 541230 : if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
3560 : {
3561 : TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
3562 541230 : PendingCheckpointerStats.buffers_written++;
3563 541230 : num_written++;
3564 : }
3565 : }
3566 :
3567 : /*
3568 : * Measure progress independent of actually having to flush the buffer
3569 : * - otherwise writing become unbalanced.
3570 : */
3571 580032 : ts_stat->progress += ts_stat->progress_slice;
3572 580032 : ts_stat->num_scanned++;
3573 580032 : ts_stat->index++;
3574 :
3575 : /* Have all the buffers from the tablespace been processed? */
3576 580032 : if (ts_stat->num_scanned == ts_stat->num_to_scan)
3577 : {
3578 3102 : binaryheap_remove_first(ts_heap);
3579 : }
3580 : else
3581 : {
3582 : /* update heap with the new progress */
3583 576930 : binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
3584 : }
3585 :
3586 : /*
3587 : * Sleep to throttle our I/O rate.
3588 : *
3589 : * (This will check for barrier events even if it doesn't sleep.)
3590 : */
3591 580032 : CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
3592 : }
3593 :
3594 : /*
3595 : * Issue all pending flushes. Only checkpointer calls BufferSync(), so
3596 : * IOContext will always be IOCONTEXT_NORMAL.
3597 : */
3598 2076 : IssuePendingWritebacks(&wb_context, IOCONTEXT_NORMAL);
3599 :
3600 2076 : pfree(per_ts_stat);
3601 2076 : per_ts_stat = NULL;
3602 2076 : binaryheap_free(ts_heap);
3603 :
3604 : /*
3605 : * Update checkpoint statistics. As noted above, this doesn't include
3606 : * buffers written by other backends or bgwriter scan.
3607 : */
3608 2076 : CheckpointStats.ckpt_bufs_written += num_written;
3609 :
3610 : TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
3611 : }
3612 :
3613 : /*
3614 : * BgBufferSync -- Write out some dirty buffers in the pool.
3615 : *
3616 : * This is called periodically by the background writer process.
3617 : *
3618 : * Returns true if it's appropriate for the bgwriter process to go into
3619 : * low-power hibernation mode. (This happens if the strategy clock sweep
3620 : * has been "lapped" and no buffer allocations have occurred recently,
3621 : * or if the bgwriter has been effectively disabled by setting
3622 : * bgwriter_lru_maxpages to 0.)
3623 : */
3624 : bool
3625 24432 : BgBufferSync(WritebackContext *wb_context)
3626 : {
3627 : /* info obtained from freelist.c */
3628 : int strategy_buf_id;
3629 : uint32 strategy_passes;
3630 : uint32 recent_alloc;
3631 :
3632 : /*
3633 : * Information saved between calls so we can determine the strategy
3634 : * point's advance rate and avoid scanning already-cleaned buffers.
3635 : */
3636 : static bool saved_info_valid = false;
3637 : static int prev_strategy_buf_id;
3638 : static uint32 prev_strategy_passes;
3639 : static int next_to_clean;
3640 : static uint32 next_passes;
3641 :
3642 : /* Moving averages of allocation rate and clean-buffer density */
3643 : static float smoothed_alloc = 0;
3644 : static float smoothed_density = 10.0;
3645 :
3646 : /* Potentially these could be tunables, but for now, not */
3647 24432 : float smoothing_samples = 16;
3648 24432 : float scan_whole_pool_milliseconds = 120000.0;
3649 :
3650 : /* Used to compute how far we scan ahead */
3651 : long strategy_delta;
3652 : int bufs_to_lap;
3653 : int bufs_ahead;
3654 : float scans_per_alloc;
3655 : int reusable_buffers_est;
3656 : int upcoming_alloc_est;
3657 : int min_scan_buffers;
3658 :
3659 : /* Variables for the scanning loop proper */
3660 : int num_to_scan;
3661 : int num_written;
3662 : int reusable_buffers;
3663 :
3664 : /* Variables for final smoothed_density update */
3665 : long new_strategy_delta;
3666 : uint32 new_recent_alloc;
3667 :
3668 : /*
3669 : * Find out where the freelist clock sweep currently is, and how many
3670 : * buffer allocations have happened since our last call.
3671 : */
3672 24432 : strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
3673 :
3674 : /* Report buffer alloc counts to pgstat */
3675 24432 : PendingBgWriterStats.buf_alloc += recent_alloc;
3676 :
3677 : /*
3678 : * If we're not running the LRU scan, just stop after doing the stats
3679 : * stuff. We mark the saved state invalid so that we can recover sanely
3680 : * if LRU scan is turned back on later.
3681 : */
3682 24432 : if (bgwriter_lru_maxpages <= 0)
3683 : {
3684 66 : saved_info_valid = false;
3685 66 : return true;
3686 : }
3687 :
3688 : /*
3689 : * Compute strategy_delta = how many buffers have been scanned by the
3690 : * clock sweep since last time. If first time through, assume none. Then
3691 : * see if we are still ahead of the clock sweep, and if so, how many
3692 : * buffers we could scan before we'd catch up with it and "lap" it. Note:
3693 : * weird-looking coding of xxx_passes comparisons are to avoid bogus
3694 : * behavior when the passes counts wrap around.
3695 : */
3696 24366 : if (saved_info_valid)
3697 : {
3698 23354 : int32 passes_delta = strategy_passes - prev_strategy_passes;
3699 :
3700 23354 : strategy_delta = strategy_buf_id - prev_strategy_buf_id;
3701 23354 : strategy_delta += (long) passes_delta * NBuffers;
3702 :
3703 : Assert(strategy_delta >= 0);
3704 :
3705 23354 : if ((int32) (next_passes - strategy_passes) > 0)
3706 : {
3707 : /* we're one pass ahead of the strategy point */
3708 4858 : bufs_to_lap = strategy_buf_id - next_to_clean;
3709 : #ifdef BGW_DEBUG
3710 : elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3711 : next_passes, next_to_clean,
3712 : strategy_passes, strategy_buf_id,
3713 : strategy_delta, bufs_to_lap);
3714 : #endif
3715 : }
3716 18496 : else if (next_passes == strategy_passes &&
3717 14716 : next_to_clean >= strategy_buf_id)
3718 : {
3719 : /* on same pass, but ahead or at least not behind */
3720 14364 : bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
3721 : #ifdef BGW_DEBUG
3722 : elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3723 : next_passes, next_to_clean,
3724 : strategy_passes, strategy_buf_id,
3725 : strategy_delta, bufs_to_lap);
3726 : #endif
3727 : }
3728 : else
3729 : {
3730 : /*
3731 : * We're behind, so skip forward to the strategy point and start
3732 : * cleaning from there.
3733 : */
3734 : #ifdef BGW_DEBUG
3735 : elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
3736 : next_passes, next_to_clean,
3737 : strategy_passes, strategy_buf_id,
3738 : strategy_delta);
3739 : #endif
3740 4132 : next_to_clean = strategy_buf_id;
3741 4132 : next_passes = strategy_passes;
3742 4132 : bufs_to_lap = NBuffers;
3743 : }
3744 : }
3745 : else
3746 : {
3747 : /*
3748 : * Initializing at startup or after LRU scanning had been off. Always
3749 : * start at the strategy point.
3750 : */
3751 : #ifdef BGW_DEBUG
3752 : elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
3753 : strategy_passes, strategy_buf_id);
3754 : #endif
3755 1012 : strategy_delta = 0;
3756 1012 : next_to_clean = strategy_buf_id;
3757 1012 : next_passes = strategy_passes;
3758 1012 : bufs_to_lap = NBuffers;
3759 : }
3760 :
3761 : /* Update saved info for next time */
3762 24366 : prev_strategy_buf_id = strategy_buf_id;
3763 24366 : prev_strategy_passes = strategy_passes;
3764 24366 : saved_info_valid = true;
3765 :
3766 : /*
3767 : * Compute how many buffers had to be scanned for each new allocation, ie,
3768 : * 1/density of reusable buffers, and track a moving average of that.
3769 : *
3770 : * If the strategy point didn't move, we don't update the density estimate
3771 : */
3772 24366 : if (strategy_delta > 0 && recent_alloc > 0)
3773 : {
3774 5282 : scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
3775 5282 : smoothed_density += (scans_per_alloc - smoothed_density) /
3776 : smoothing_samples;
3777 : }
3778 :
3779 : /*
3780 : * Estimate how many reusable buffers there are between the current
3781 : * strategy point and where we've scanned ahead to, based on the smoothed
3782 : * density estimate.
3783 : */
3784 24366 : bufs_ahead = NBuffers - bufs_to_lap;
3785 24366 : reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3786 :
3787 : /*
3788 : * Track a moving average of recent buffer allocations. Here, rather than
3789 : * a true average we want a fast-attack, slow-decline behavior: we
3790 : * immediately follow any increase.
3791 : */
3792 24366 : if (smoothed_alloc <= (float) recent_alloc)
3793 7294 : smoothed_alloc = recent_alloc;
3794 : else
3795 17072 : smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3796 : smoothing_samples;
3797 :
3798 : /* Scale the estimate by a GUC to allow more aggressive tuning. */
3799 24366 : upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3800 :
3801 : /*
3802 : * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3803 : * eventually underflow to zero, and the underflows produce annoying
3804 : * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3805 : * zero, there's no point in tracking smaller and smaller values of
3806 : * smoothed_alloc, so just reset it to exactly zero to avoid this
3807 : * syndrome. It will pop back up as soon as recent_alloc increases.
3808 : */
3809 24366 : if (upcoming_alloc_est == 0)
3810 4780 : smoothed_alloc = 0;
3811 :
3812 : /*
3813 : * Even in cases where there's been little or no buffer allocation
3814 : * activity, we want to make a small amount of progress through the buffer
3815 : * cache so that as many reusable buffers as possible are clean after an
3816 : * idle period.
3817 : *
3818 : * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3819 : * the BGW will be called during the scan_whole_pool time; slice the
3820 : * buffer pool into that many sections.
3821 : */
3822 24366 : min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3823 :
3824 24366 : if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3825 : {
3826 : #ifdef BGW_DEBUG
3827 : elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3828 : upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3829 : #endif
3830 12164 : upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3831 : }
3832 :
3833 : /*
3834 : * Now write out dirty reusable buffers, working forward from the
3835 : * next_to_clean point, until we have lapped the strategy scan, or cleaned
3836 : * enough buffers to match our estimate of the next cycle's allocation
3837 : * requirements, or hit the bgwriter_lru_maxpages limit.
3838 : */
3839 :
3840 24366 : num_to_scan = bufs_to_lap;
3841 24366 : num_written = 0;
3842 24366 : reusable_buffers = reusable_buffers_est;
3843 :
3844 : /* Execute the LRU scan */
3845 3411378 : while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3846 : {
3847 3387024 : int sync_state = SyncOneBuffer(next_to_clean, true,
3848 : wb_context);
3849 :
3850 3387024 : if (++next_to_clean >= NBuffers)
3851 : {
3852 4698 : next_to_clean = 0;
3853 4698 : next_passes++;
3854 : }
3855 3387024 : num_to_scan--;
3856 :
3857 3387024 : if (sync_state & BUF_WRITTEN)
3858 : {
3859 38074 : reusable_buffers++;
3860 38074 : if (++num_written >= bgwriter_lru_maxpages)
3861 : {
3862 12 : PendingBgWriterStats.maxwritten_clean++;
3863 12 : break;
3864 : }
3865 : }
3866 3348950 : else if (sync_state & BUF_REUSABLE)
3867 2482072 : reusable_buffers++;
3868 : }
3869 :
3870 24366 : PendingBgWriterStats.buf_written_clean += num_written;
3871 :
3872 : #ifdef BGW_DEBUG
3873 : elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3874 : recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3875 : smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3876 : bufs_to_lap - num_to_scan,
3877 : num_written,
3878 : reusable_buffers - reusable_buffers_est);
3879 : #endif
3880 :
3881 : /*
3882 : * Consider the above scan as being like a new allocation scan.
3883 : * Characterize its density and update the smoothed one based on it. This
3884 : * effectively halves the moving average period in cases where both the
3885 : * strategy and the background writer are doing some useful scanning,
3886 : * which is helpful because a long memory isn't as desirable on the
3887 : * density estimates.
3888 : */
3889 24366 : new_strategy_delta = bufs_to_lap - num_to_scan;
3890 24366 : new_recent_alloc = reusable_buffers - reusable_buffers_est;
3891 24366 : if (new_strategy_delta > 0 && new_recent_alloc > 0)
3892 : {
3893 18008 : scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3894 18008 : smoothed_density += (scans_per_alloc - smoothed_density) /
3895 : smoothing_samples;
3896 :
3897 : #ifdef BGW_DEBUG
3898 : elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3899 : new_recent_alloc, new_strategy_delta,
3900 : scans_per_alloc, smoothed_density);
3901 : #endif
3902 : }
3903 :
3904 : /* Return true if OK to hibernate */
3905 24366 : return (bufs_to_lap == 0 && recent_alloc == 0);
3906 : }
3907 :
3908 : /*
3909 : * SyncOneBuffer -- process a single buffer during syncing.
3910 : *
3911 : * If skip_recently_used is true, we don't write currently-pinned buffers, nor
3912 : * buffers marked recently used, as these are not replacement candidates.
3913 : *
3914 : * Returns a bitmask containing the following flag bits:
3915 : * BUF_WRITTEN: we wrote the buffer.
3916 : * BUF_REUSABLE: buffer is available for replacement, ie, it has
3917 : * pin count 0 and usage count 0.
3918 : *
3919 : * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
3920 : * after locking it, but we don't care all that much.)
3921 : */
3922 : static int
3923 3928254 : SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
3924 : {
3925 3928254 : BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
3926 3928254 : int result = 0;
3927 : uint32 buf_state;
3928 : BufferTag tag;
3929 :
3930 : /* Make sure we can handle the pin */
3931 3928254 : ReservePrivateRefCountEntry();
3932 3928254 : ResourceOwnerEnlarge(CurrentResourceOwner);
3933 :
3934 : /*
3935 : * Check whether buffer needs writing.
3936 : *
3937 : * We can make this check without taking the buffer content lock so long
3938 : * as we mark pages dirty in access methods *before* logging changes with
3939 : * XLogInsert(): if someone marks the buffer dirty just after our check we
3940 : * don't worry because our checkpoint.redo points before log record for
3941 : * upcoming changes and so we are not required to write such dirty buffer.
3942 : */
3943 3928254 : buf_state = LockBufHdr(bufHdr);
3944 :
3945 3928254 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
3946 3920428 : BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3947 : {
3948 2523478 : result |= BUF_REUSABLE;
3949 : }
3950 1404776 : else if (skip_recently_used)
3951 : {
3952 : /* Caller told us not to write recently-used buffers */
3953 866878 : UnlockBufHdr(bufHdr, buf_state);
3954 866878 : return result;
3955 : }
3956 :
3957 3061376 : if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
3958 : {
3959 : /* It's clean, so nothing to do */
3960 2482072 : UnlockBufHdr(bufHdr, buf_state);
3961 2482072 : return result;
3962 : }
3963 :
3964 : /*
3965 : * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
3966 : * buffer is clean by the time we've locked it.)
3967 : */
3968 579304 : PinBuffer_Locked(bufHdr);
3969 579304 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
3970 :
3971 579304 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
3972 :
3973 579304 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
3974 :
3975 579304 : tag = bufHdr->tag;
3976 :
3977 579304 : UnpinBuffer(bufHdr);
3978 :
3979 : /*
3980 : * SyncOneBuffer() is only called by checkpointer and bgwriter, so
3981 : * IOContext will always be IOCONTEXT_NORMAL.
3982 : */
3983 579304 : ScheduleBufferTagForWriteback(wb_context, IOCONTEXT_NORMAL, &tag);
3984 :
3985 579304 : return result | BUF_WRITTEN;
3986 : }
3987 :
3988 : /*
3989 : * AtEOXact_Buffers - clean up at end of transaction.
3990 : *
3991 : * As of PostgreSQL 8.0, buffer pins should get released by the
3992 : * ResourceOwner mechanism. This routine is just a debugging
3993 : * cross-check that no pins remain.
3994 : */
3995 : void
3996 930662 : AtEOXact_Buffers(bool isCommit)
3997 : {
3998 930662 : CheckForBufferLeaks();
3999 :
4000 930662 : AtEOXact_LocalBuffers(isCommit);
4001 :
4002 : Assert(PrivateRefCountOverflowed == 0);
4003 930662 : }
4004 :
4005 : /*
4006 : * Initialize access to shared buffer pool
4007 : *
4008 : * This is called during backend startup (whether standalone or under the
4009 : * postmaster). It sets up for this backend's access to the already-existing
4010 : * buffer pool.
4011 : */
4012 : void
4013 41582 : InitBufferManagerAccess(void)
4014 : {
4015 : HASHCTL hash_ctl;
4016 :
4017 : /*
4018 : * An advisory limit on the number of pins each backend should hold, based
4019 : * on shared_buffers and the maximum number of connections possible.
4020 : * That's very pessimistic, but outside toy-sized shared_buffers it should
4021 : * allow plenty of pins. LimitAdditionalPins() and
4022 : * GetAdditionalPinLimit() can be used to check the remaining balance.
4023 : */
4024 41582 : MaxProportionalPins = NBuffers / (MaxBackends + NUM_AUXILIARY_PROCS);
4025 :
4026 41582 : memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
4027 :
4028 41582 : hash_ctl.keysize = sizeof(int32);
4029 41582 : hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
4030 :
4031 41582 : PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
4032 : HASH_ELEM | HASH_BLOBS);
4033 :
4034 : /*
4035 : * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
4036 : * the corresponding phase of backend shutdown.
4037 : */
4038 : Assert(MyProc != NULL);
4039 41582 : on_shmem_exit(AtProcExit_Buffers, 0);
4040 41582 : }
4041 :
4042 : /*
4043 : * During backend exit, ensure that we released all shared-buffer locks and
4044 : * assert that we have no remaining pins.
4045 : */
4046 : static void
4047 41582 : AtProcExit_Buffers(int code, Datum arg)
4048 : {
4049 41582 : UnlockBuffers();
4050 :
4051 41582 : CheckForBufferLeaks();
4052 :
4053 : /* localbuf.c needs a chance too */
4054 41582 : AtProcExit_LocalBuffers();
4055 41582 : }
4056 :
4057 : /*
4058 : * CheckForBufferLeaks - ensure this backend holds no buffer pins
4059 : *
4060 : * As of PostgreSQL 8.0, buffer pins should get released by the
4061 : * ResourceOwner mechanism. This routine is just a debugging
4062 : * cross-check that no pins remain.
4063 : */
4064 : static void
4065 972244 : CheckForBufferLeaks(void)
4066 : {
4067 : #ifdef USE_ASSERT_CHECKING
4068 : int RefCountErrors = 0;
4069 : PrivateRefCountEntry *res;
4070 : int i;
4071 : char *s;
4072 :
4073 : /* check the array */
4074 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
4075 : {
4076 : res = &PrivateRefCountArray[i];
4077 :
4078 : if (res->buffer != InvalidBuffer)
4079 : {
4080 : s = DebugPrintBufferRefcount(res->buffer);
4081 : elog(WARNING, "buffer refcount leak: %s", s);
4082 : pfree(s);
4083 :
4084 : RefCountErrors++;
4085 : }
4086 : }
4087 :
4088 : /* if necessary search the hash */
4089 : if (PrivateRefCountOverflowed)
4090 : {
4091 : HASH_SEQ_STATUS hstat;
4092 :
4093 : hash_seq_init(&hstat, PrivateRefCountHash);
4094 : while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
4095 : {
4096 : s = DebugPrintBufferRefcount(res->buffer);
4097 : elog(WARNING, "buffer refcount leak: %s", s);
4098 : pfree(s);
4099 : RefCountErrors++;
4100 : }
4101 : }
4102 :
4103 : Assert(RefCountErrors == 0);
4104 : #endif
4105 972244 : }
4106 :
4107 : #ifdef USE_ASSERT_CHECKING
4108 : /*
4109 : * Check for exclusive-locked catalog buffers. This is the core of
4110 : * AssertCouldGetRelation().
4111 : *
4112 : * A backend would self-deadlock on LWLocks if the catalog scan read the
4113 : * exclusive-locked buffer. The main threat is exclusive-locked buffers of
4114 : * catalogs used in relcache, because a catcache search on any catalog may
4115 : * build that catalog's relcache entry. We don't have an inventory of
4116 : * catalogs relcache uses, so just check buffers of most catalogs.
4117 : *
4118 : * It's better to minimize waits while holding an exclusive buffer lock, so it
4119 : * would be nice to broaden this check not to be catalog-specific. However,
4120 : * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
4121 : * read tables. That is deadlock-free as long as there's no loop in the
4122 : * dependency graph: modifying table A may cause an opclass to read table B,
4123 : * but it must not cause a read of table A.
4124 : */
4125 : void
4126 : AssertBufferLocksPermitCatalogRead(void)
4127 : {
4128 : ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
4129 : }
4130 :
4131 : static void
4132 : AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
4133 : void *unused_context)
4134 : {
4135 : BufferDesc *bufHdr;
4136 : BufferTag tag;
4137 : Oid relid;
4138 :
4139 : if (mode != LW_EXCLUSIVE)
4140 : return;
4141 :
4142 : if (!((BufferDescPadded *) lock > BufferDescriptors &&
4143 : (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
4144 : return; /* not a buffer lock */
4145 :
4146 : bufHdr = (BufferDesc *)
4147 : ((char *) lock - offsetof(BufferDesc, content_lock));
4148 : tag = bufHdr->tag;
4149 :
4150 : /*
4151 : * This relNumber==relid assumption holds until a catalog experiences
4152 : * VACUUM FULL or similar. After a command like that, relNumber will be
4153 : * in the normal (non-catalog) range, and we lose the ability to detect
4154 : * hazardous access to that catalog. Calling RelidByRelfilenumber() would
4155 : * close that gap, but RelidByRelfilenumber() might then deadlock with a
4156 : * held lock.
4157 : */
4158 : relid = tag.relNumber;
4159 :
4160 : if (IsCatalogTextUniqueIndexOid(relid)) /* see comments at the callee */
4161 : return;
4162 :
4163 : Assert(!IsCatalogRelationOid(relid));
4164 : }
4165 : #endif
4166 :
4167 :
4168 : /*
4169 : * Helper routine to issue warnings when a buffer is unexpectedly pinned
4170 : */
4171 : char *
4172 80 : DebugPrintBufferRefcount(Buffer buffer)
4173 : {
4174 : BufferDesc *buf;
4175 : int32 loccount;
4176 : char *result;
4177 : ProcNumber backend;
4178 : uint32 buf_state;
4179 :
4180 : Assert(BufferIsValid(buffer));
4181 80 : if (BufferIsLocal(buffer))
4182 : {
4183 32 : buf = GetLocalBufferDescriptor(-buffer - 1);
4184 32 : loccount = LocalRefCount[-buffer - 1];
4185 32 : backend = MyProcNumber;
4186 : }
4187 : else
4188 : {
4189 48 : buf = GetBufferDescriptor(buffer - 1);
4190 48 : loccount = GetPrivateRefCount(buffer);
4191 48 : backend = INVALID_PROC_NUMBER;
4192 : }
4193 :
4194 : /* theoretically we should lock the bufhdr here */
4195 80 : buf_state = pg_atomic_read_u32(&buf->state);
4196 :
4197 80 : result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
4198 : buffer,
4199 80 : relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
4200 : BufTagGetForkNum(&buf->tag)).str,
4201 : buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
4202 : BUF_STATE_GET_REFCOUNT(buf_state), loccount);
4203 80 : return result;
4204 : }
4205 :
4206 : /*
4207 : * CheckPointBuffers
4208 : *
4209 : * Flush all dirty blocks in buffer pool to disk at checkpoint time.
4210 : *
4211 : * Note: temporary relations do not participate in checkpoints, so they don't
4212 : * need to be flushed.
4213 : */
4214 : void
4215 3362 : CheckPointBuffers(int flags)
4216 : {
4217 3362 : BufferSync(flags);
4218 3362 : }
4219 :
4220 : /*
4221 : * BufferGetBlockNumber
4222 : * Returns the block number associated with a buffer.
4223 : *
4224 : * Note:
4225 : * Assumes that the buffer is valid and pinned, else the
4226 : * value may be obsolete immediately...
4227 : */
4228 : BlockNumber
4229 98948220 : BufferGetBlockNumber(Buffer buffer)
4230 : {
4231 : BufferDesc *bufHdr;
4232 :
4233 : Assert(BufferIsPinned(buffer));
4234 :
4235 98948220 : if (BufferIsLocal(buffer))
4236 3798946 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
4237 : else
4238 95149274 : bufHdr = GetBufferDescriptor(buffer - 1);
4239 :
4240 : /* pinned, so OK to read tag without spinlock */
4241 98948220 : return bufHdr->tag.blockNum;
4242 : }
4243 :
4244 : /*
4245 : * BufferGetTag
4246 : * Returns the relfilelocator, fork number and block number associated with
4247 : * a buffer.
4248 : */
4249 : void
4250 30160562 : BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
4251 : BlockNumber *blknum)
4252 : {
4253 : BufferDesc *bufHdr;
4254 :
4255 : /* Do the same checks as BufferGetBlockNumber. */
4256 : Assert(BufferIsPinned(buffer));
4257 :
4258 30160562 : if (BufferIsLocal(buffer))
4259 0 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
4260 : else
4261 30160562 : bufHdr = GetBufferDescriptor(buffer - 1);
4262 :
4263 : /* pinned, so OK to read tag without spinlock */
4264 30160562 : *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
4265 30160562 : *forknum = BufTagGetForkNum(&bufHdr->tag);
4266 30160562 : *blknum = bufHdr->tag.blockNum;
4267 30160562 : }
4268 :
4269 : /*
4270 : * FlushBuffer
4271 : * Physically write out a shared buffer.
4272 : *
4273 : * NOTE: this actually just passes the buffer contents to the kernel; the
4274 : * real write to disk won't happen until the kernel feels like it. This
4275 : * is okay from our point of view since we can redo the changes from WAL.
4276 : * However, we will need to force the changes to disk via fsync before
4277 : * we can checkpoint WAL.
4278 : *
4279 : * The caller must hold a pin on the buffer and have share-locked the
4280 : * buffer contents. (Note: a share-lock does not prevent updates of
4281 : * hint bits in the buffer, so the page could change while the write
4282 : * is in progress, but we assume that that will not invalidate the data
4283 : * written.)
4284 : *
4285 : * If the caller has an smgr reference for the buffer's relation, pass it
4286 : * as the second parameter. If not, pass NULL.
4287 : */
4288 : static void
4289 1102586 : FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
4290 : IOContext io_context)
4291 : {
4292 : XLogRecPtr recptr;
4293 : ErrorContextCallback errcallback;
4294 : instr_time io_start;
4295 : Block bufBlock;
4296 : char *bufToWrite;
4297 : uint32 buf_state;
4298 :
4299 : /*
4300 : * Try to start an I/O operation. If StartBufferIO returns false, then
4301 : * someone else flushed the buffer before we could, so we need not do
4302 : * anything.
4303 : */
4304 1102586 : if (!StartBufferIO(buf, false, false))
4305 20 : return;
4306 :
4307 : /* Setup error traceback support for ereport() */
4308 1102566 : errcallback.callback = shared_buffer_write_error_callback;
4309 1102566 : errcallback.arg = buf;
4310 1102566 : errcallback.previous = error_context_stack;
4311 1102566 : error_context_stack = &errcallback;
4312 :
4313 : /* Find smgr relation for buffer */
4314 1102566 : if (reln == NULL)
4315 1092194 : reln = smgropen(BufTagGetRelFileLocator(&buf->tag), INVALID_PROC_NUMBER);
4316 :
4317 : TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
4318 : buf->tag.blockNum,
4319 : reln->smgr_rlocator.locator.spcOid,
4320 : reln->smgr_rlocator.locator.dbOid,
4321 : reln->smgr_rlocator.locator.relNumber);
4322 :
4323 1102566 : buf_state = LockBufHdr(buf);
4324 :
4325 : /*
4326 : * Run PageGetLSN while holding header lock, since we don't have the
4327 : * buffer locked exclusively in all cases.
4328 : */
4329 1102566 : recptr = BufferGetLSN(buf);
4330 :
4331 : /* To check if block content changes while flushing. - vadim 01/17/97 */
4332 1102566 : buf_state &= ~BM_JUST_DIRTIED;
4333 1102566 : UnlockBufHdr(buf, buf_state);
4334 :
4335 : /*
4336 : * Force XLOG flush up to buffer's LSN. This implements the basic WAL
4337 : * rule that log updates must hit disk before any of the data-file changes
4338 : * they describe do.
4339 : *
4340 : * However, this rule does not apply to unlogged relations, which will be
4341 : * lost after a crash anyway. Most unlogged relation pages do not bear
4342 : * LSNs since we never emit WAL records for them, and therefore flushing
4343 : * up through the buffer LSN would be useless, but harmless. However,
4344 : * GiST indexes use LSNs internally to track page-splits, and therefore
4345 : * unlogged GiST pages bear "fake" LSNs generated by
4346 : * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
4347 : * LSN counter could advance past the WAL insertion point; and if it did
4348 : * happen, attempting to flush WAL through that location would fail, with
4349 : * disastrous system-wide consequences. To make sure that can't happen,
4350 : * skip the flush if the buffer isn't permanent.
4351 : */
4352 1102566 : if (buf_state & BM_PERMANENT)
4353 1099004 : XLogFlush(recptr);
4354 :
4355 : /*
4356 : * Now it's safe to write the buffer to disk. Note that no one else should
4357 : * have been able to write it, while we were busy with log flushing,
4358 : * because we got the exclusive right to perform I/O by setting the
4359 : * BM_IO_IN_PROGRESS bit.
4360 : */
4361 1102566 : bufBlock = BufHdrGetBlock(buf);
4362 :
4363 : /*
4364 : * Update page checksum if desired. Since we have only shared lock on the
4365 : * buffer, other processes might be updating hint bits in it, so we must
4366 : * copy the page to private storage if we do checksumming.
4367 : */
4368 1102566 : bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
4369 :
4370 1102566 : io_start = pgstat_prepare_io_time(track_io_timing);
4371 :
4372 : /*
4373 : * bufToWrite is either the shared buffer or a copy, as appropriate.
4374 : */
4375 1102566 : smgrwrite(reln,
4376 1102566 : BufTagGetForkNum(&buf->tag),
4377 : buf->tag.blockNum,
4378 : bufToWrite,
4379 : false);
4380 :
4381 : /*
4382 : * When a strategy is in use, only flushes of dirty buffers already in the
4383 : * strategy ring are counted as strategy writes (IOCONTEXT
4384 : * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
4385 : * statistics tracking.
4386 : *
4387 : * If a shared buffer initially added to the ring must be flushed before
4388 : * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
4389 : *
4390 : * If a shared buffer which was added to the ring later because the
4391 : * current strategy buffer is pinned or in use or because all strategy
4392 : * buffers were dirty and rejected (for BAS_BULKREAD operations only)
4393 : * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
4394 : * (from_ring will be false).
4395 : *
4396 : * When a strategy is not in use, the write can only be a "regular" write
4397 : * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
4398 : */
4399 1102566 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
4400 : IOOP_WRITE, io_start, 1, BLCKSZ);
4401 :
4402 1102566 : pgBufferUsage.shared_blks_written++;
4403 :
4404 : /*
4405 : * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
4406 : * end the BM_IO_IN_PROGRESS state.
4407 : */
4408 1102566 : TerminateBufferIO(buf, true, 0, true, false);
4409 :
4410 : TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
4411 : buf->tag.blockNum,
4412 : reln->smgr_rlocator.locator.spcOid,
4413 : reln->smgr_rlocator.locator.dbOid,
4414 : reln->smgr_rlocator.locator.relNumber);
4415 :
4416 : /* Pop the error context stack */
4417 1102566 : error_context_stack = errcallback.previous;
4418 : }
4419 :
4420 : /*
4421 : * RelationGetNumberOfBlocksInFork
4422 : * Determines the current number of pages in the specified relation fork.
4423 : *
4424 : * Note that the accuracy of the result will depend on the details of the
4425 : * relation's storage. For builtin AMs it'll be accurate, but for external AMs
4426 : * it might not be.
4427 : */
4428 : BlockNumber
4429 3975490 : RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
4430 : {
4431 3975490 : if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
4432 : {
4433 : /*
4434 : * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
4435 : * tableam returns the size in bytes - but for the purpose of this
4436 : * routine, we want the number of blocks. Therefore divide, rounding
4437 : * up.
4438 : */
4439 : uint64 szbytes;
4440 :
4441 3072572 : szbytes = table_relation_size(relation, forkNum);
4442 :
4443 3072534 : return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
4444 : }
4445 902918 : else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
4446 : {
4447 902918 : return smgrnblocks(RelationGetSmgr(relation), forkNum);
4448 : }
4449 : else
4450 : Assert(false);
4451 :
4452 0 : return 0; /* keep compiler quiet */
4453 : }
4454 :
4455 : /*
4456 : * BufferIsPermanent
4457 : * Determines whether a buffer will potentially still be around after
4458 : * a crash. Caller must hold a buffer pin.
4459 : */
4460 : bool
4461 19958886 : BufferIsPermanent(Buffer buffer)
4462 : {
4463 : BufferDesc *bufHdr;
4464 :
4465 : /* Local buffers are used only for temp relations. */
4466 19958886 : if (BufferIsLocal(buffer))
4467 1248038 : return false;
4468 :
4469 : /* Make sure we've got a real buffer, and that we hold a pin on it. */
4470 : Assert(BufferIsValid(buffer));
4471 : Assert(BufferIsPinned(buffer));
4472 :
4473 : /*
4474 : * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
4475 : * need not bother with the buffer header spinlock. Even if someone else
4476 : * changes the buffer header state while we're doing this, the state is
4477 : * changed atomically, so we'll read the old value or the new value, but
4478 : * not random garbage.
4479 : */
4480 18710848 : bufHdr = GetBufferDescriptor(buffer - 1);
4481 18710848 : return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
4482 : }
4483 :
4484 : /*
4485 : * BufferGetLSNAtomic
4486 : * Retrieves the LSN of the buffer atomically using a buffer header lock.
4487 : * This is necessary for some callers who may not have an exclusive lock
4488 : * on the buffer.
4489 : */
4490 : XLogRecPtr
4491 13579078 : BufferGetLSNAtomic(Buffer buffer)
4492 : {
4493 13579078 : char *page = BufferGetPage(buffer);
4494 : BufferDesc *bufHdr;
4495 : XLogRecPtr lsn;
4496 : uint32 buf_state;
4497 :
4498 : /*
4499 : * If we don't need locking for correctness, fastpath out.
4500 : */
4501 13579078 : if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
4502 476868 : return PageGetLSN(page);
4503 :
4504 : /* Make sure we've got a real buffer, and that we hold a pin on it. */
4505 : Assert(BufferIsValid(buffer));
4506 : Assert(BufferIsPinned(buffer));
4507 :
4508 13102210 : bufHdr = GetBufferDescriptor(buffer - 1);
4509 13102210 : buf_state = LockBufHdr(bufHdr);
4510 13102210 : lsn = PageGetLSN(page);
4511 13102210 : UnlockBufHdr(bufHdr, buf_state);
4512 :
4513 13102210 : return lsn;
4514 : }
4515 :
4516 : /* ---------------------------------------------------------------------
4517 : * DropRelationBuffers
4518 : *
4519 : * This function removes from the buffer pool all the pages of the
4520 : * specified relation forks that have block numbers >= firstDelBlock.
4521 : * (In particular, with firstDelBlock = 0, all pages are removed.)
4522 : * Dirty pages are simply dropped, without bothering to write them
4523 : * out first. Therefore, this is NOT rollback-able, and so should be
4524 : * used only with extreme caution!
4525 : *
4526 : * Currently, this is called only from smgr.c when the underlying file
4527 : * is about to be deleted or truncated (firstDelBlock is needed for
4528 : * the truncation case). The data in the affected pages would therefore
4529 : * be deleted momentarily anyway, and there is no point in writing it.
4530 : * It is the responsibility of higher-level code to ensure that the
4531 : * deletion or truncation does not lose any data that could be needed
4532 : * later. It is also the responsibility of higher-level code to ensure
4533 : * that no other process could be trying to load more pages of the
4534 : * relation into buffers.
4535 : * --------------------------------------------------------------------
4536 : */
4537 : void
4538 1298 : DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
4539 : int nforks, BlockNumber *firstDelBlock)
4540 : {
4541 : int i;
4542 : int j;
4543 : RelFileLocatorBackend rlocator;
4544 : BlockNumber nForkBlock[MAX_FORKNUM];
4545 1298 : uint64 nBlocksToInvalidate = 0;
4546 :
4547 1298 : rlocator = smgr_reln->smgr_rlocator;
4548 :
4549 : /* If it's a local relation, it's localbuf.c's problem. */
4550 1298 : if (RelFileLocatorBackendIsTemp(rlocator))
4551 : {
4552 748 : if (rlocator.backend == MyProcNumber)
4553 748 : DropRelationLocalBuffers(rlocator.locator, forkNum, nforks,
4554 : firstDelBlock);
4555 :
4556 836 : return;
4557 : }
4558 :
4559 : /*
4560 : * To remove all the pages of the specified relation forks from the buffer
4561 : * pool, we need to scan the entire buffer pool but we can optimize it by
4562 : * finding the buffers from BufMapping table provided we know the exact
4563 : * size of each fork of the relation. The exact size is required to ensure
4564 : * that we don't leave any buffer for the relation being dropped as
4565 : * otherwise the background writer or checkpointer can lead to a PANIC
4566 : * error while flushing buffers corresponding to files that don't exist.
4567 : *
4568 : * To know the exact size, we rely on the size cached for each fork by us
4569 : * during recovery which limits the optimization to recovery and on
4570 : * standbys but we can easily extend it once we have shared cache for
4571 : * relation size.
4572 : *
4573 : * In recovery, we cache the value returned by the first lseek(SEEK_END)
4574 : * and the future writes keeps the cached value up-to-date. See
4575 : * smgrextend. It is possible that the value of the first lseek is smaller
4576 : * than the actual number of existing blocks in the file due to buggy
4577 : * Linux kernels that might not have accounted for the recent write. But
4578 : * that should be fine because there must not be any buffers after that
4579 : * file size.
4580 : */
4581 762 : for (i = 0; i < nforks; i++)
4582 : {
4583 : /* Get the number of blocks for a relation's fork */
4584 650 : nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
4585 :
4586 650 : if (nForkBlock[i] == InvalidBlockNumber)
4587 : {
4588 438 : nBlocksToInvalidate = InvalidBlockNumber;
4589 438 : break;
4590 : }
4591 :
4592 : /* calculate the number of blocks to be invalidated */
4593 212 : nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
4594 : }
4595 :
4596 : /*
4597 : * We apply the optimization iff the total number of blocks to invalidate
4598 : * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4599 : */
4600 550 : if (BlockNumberIsValid(nBlocksToInvalidate) &&
4601 112 : nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4602 : {
4603 248 : for (j = 0; j < nforks; j++)
4604 160 : FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
4605 160 : nForkBlock[j], firstDelBlock[j]);
4606 88 : return;
4607 : }
4608 :
4609 5976782 : for (i = 0; i < NBuffers; i++)
4610 : {
4611 5976320 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4612 : uint32 buf_state;
4613 :
4614 : /*
4615 : * We can make this a tad faster by prechecking the buffer tag before
4616 : * we attempt to lock the buffer; this saves a lot of lock
4617 : * acquisitions in typical cases. It should be safe because the
4618 : * caller must have AccessExclusiveLock on the relation, or some other
4619 : * reason to be certain that no one is loading new pages of the rel
4620 : * into the buffer pool. (Otherwise we might well miss such pages
4621 : * entirely.) Therefore, while the tag might be changing while we
4622 : * look at it, it can't be changing *to* a value we care about, only
4623 : * *away* from such a value. So false negatives are impossible, and
4624 : * false positives are safe because we'll recheck after getting the
4625 : * buffer lock.
4626 : *
4627 : * We could check forkNum and blockNum as well as the rlocator, but
4628 : * the incremental win from doing so seems small.
4629 : */
4630 5976320 : if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
4631 5960124 : continue;
4632 :
4633 16196 : buf_state = LockBufHdr(bufHdr);
4634 :
4635 41018 : for (j = 0; j < nforks; j++)
4636 : {
4637 28868 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
4638 28868 : BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
4639 15978 : bufHdr->tag.blockNum >= firstDelBlock[j])
4640 : {
4641 4046 : InvalidateBuffer(bufHdr); /* releases spinlock */
4642 4046 : break;
4643 : }
4644 : }
4645 16196 : if (j >= nforks)
4646 12150 : UnlockBufHdr(bufHdr, buf_state);
4647 : }
4648 : }
4649 :
4650 : /* ---------------------------------------------------------------------
4651 : * DropRelationsAllBuffers
4652 : *
4653 : * This function removes from the buffer pool all the pages of all
4654 : * forks of the specified relations. It's equivalent to calling
4655 : * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
4656 : * --------------------------------------------------------------------
4657 : */
4658 : void
4659 27952 : DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
4660 : {
4661 : int i;
4662 27952 : int n = 0;
4663 : SMgrRelation *rels;
4664 : BlockNumber (*block)[MAX_FORKNUM + 1];
4665 27952 : uint64 nBlocksToInvalidate = 0;
4666 : RelFileLocator *locators;
4667 27952 : bool cached = true;
4668 : bool use_bsearch;
4669 :
4670 27952 : if (nlocators == 0)
4671 0 : return;
4672 :
4673 27952 : rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
4674 :
4675 : /* If it's a local relation, it's localbuf.c's problem. */
4676 120544 : for (i = 0; i < nlocators; i++)
4677 : {
4678 92592 : if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
4679 : {
4680 6224 : if (smgr_reln[i]->smgr_rlocator.backend == MyProcNumber)
4681 6224 : DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
4682 : }
4683 : else
4684 86368 : rels[n++] = smgr_reln[i];
4685 : }
4686 :
4687 : /*
4688 : * If there are no non-local relations, then we're done. Release the
4689 : * memory and return.
4690 : */
4691 27952 : if (n == 0)
4692 : {
4693 1596 : pfree(rels);
4694 1596 : return;
4695 : }
4696 :
4697 : /*
4698 : * This is used to remember the number of blocks for all the relations
4699 : * forks.
4700 : */
4701 : block = (BlockNumber (*)[MAX_FORKNUM + 1])
4702 26356 : palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
4703 :
4704 : /*
4705 : * We can avoid scanning the entire buffer pool if we know the exact size
4706 : * of each of the given relation forks. See DropRelationBuffers.
4707 : */
4708 55378 : for (i = 0; i < n && cached; i++)
4709 : {
4710 46368 : for (int j = 0; j <= MAX_FORKNUM; j++)
4711 : {
4712 : /* Get the number of blocks for a relation's fork. */
4713 42054 : block[i][j] = smgrnblocks_cached(rels[i], j);
4714 :
4715 : /* We need to only consider the relation forks that exists. */
4716 42054 : if (block[i][j] == InvalidBlockNumber)
4717 : {
4718 37352 : if (!smgrexists(rels[i], j))
4719 12644 : continue;
4720 24708 : cached = false;
4721 24708 : break;
4722 : }
4723 :
4724 : /* calculate the total number of blocks to be invalidated */
4725 4702 : nBlocksToInvalidate += block[i][j];
4726 : }
4727 : }
4728 :
4729 : /*
4730 : * We apply the optimization iff the total number of blocks to invalidate
4731 : * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4732 : */
4733 26356 : if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4734 : {
4735 2714 : for (i = 0; i < n; i++)
4736 : {
4737 7470 : for (int j = 0; j <= MAX_FORKNUM; j++)
4738 : {
4739 : /* ignore relation forks that doesn't exist */
4740 5976 : if (!BlockNumberIsValid(block[i][j]))
4741 4464 : continue;
4742 :
4743 : /* drop all the buffers for a particular relation fork */
4744 1512 : FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
4745 1512 : j, block[i][j], 0);
4746 : }
4747 : }
4748 :
4749 1220 : pfree(block);
4750 1220 : pfree(rels);
4751 1220 : return;
4752 : }
4753 :
4754 25136 : pfree(block);
4755 25136 : locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
4756 110010 : for (i = 0; i < n; i++)
4757 84874 : locators[i] = rels[i]->smgr_rlocator.locator;
4758 :
4759 : /*
4760 : * For low number of relations to drop just use a simple walk through, to
4761 : * save the bsearch overhead. The threshold to use is rather a guess than
4762 : * an exactly determined value, as it depends on many factors (CPU and RAM
4763 : * speeds, amount of shared buffers etc.).
4764 : */
4765 25136 : use_bsearch = n > RELS_BSEARCH_THRESHOLD;
4766 :
4767 : /* sort the list of rlocators if necessary */
4768 25136 : if (use_bsearch)
4769 334 : qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
4770 :
4771 280179760 : for (i = 0; i < NBuffers; i++)
4772 : {
4773 280154624 : RelFileLocator *rlocator = NULL;
4774 280154624 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4775 : uint32 buf_state;
4776 :
4777 : /*
4778 : * As in DropRelationBuffers, an unlocked precheck should be safe and
4779 : * saves some cycles.
4780 : */
4781 :
4782 280154624 : if (!use_bsearch)
4783 : {
4784 : int j;
4785 :
4786 1093316728 : for (j = 0; j < n; j++)
4787 : {
4788 816860170 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
4789 : {
4790 176530 : rlocator = &locators[j];
4791 176530 : break;
4792 : }
4793 : }
4794 : }
4795 : else
4796 : {
4797 : RelFileLocator locator;
4798 :
4799 3521536 : locator = BufTagGetRelFileLocator(&bufHdr->tag);
4800 3521536 : rlocator = bsearch(&locator,
4801 : locators, n, sizeof(RelFileLocator),
4802 : rlocator_comparator);
4803 : }
4804 :
4805 : /* buffer doesn't belong to any of the given relfilelocators; skip it */
4806 280154624 : if (rlocator == NULL)
4807 279974656 : continue;
4808 :
4809 179968 : buf_state = LockBufHdr(bufHdr);
4810 179968 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
4811 179968 : InvalidateBuffer(bufHdr); /* releases spinlock */
4812 : else
4813 0 : UnlockBufHdr(bufHdr, buf_state);
4814 : }
4815 :
4816 25136 : pfree(locators);
4817 25136 : pfree(rels);
4818 : }
4819 :
4820 : /* ---------------------------------------------------------------------
4821 : * FindAndDropRelationBuffers
4822 : *
4823 : * This function performs look up in BufMapping table and removes from the
4824 : * buffer pool all the pages of the specified relation fork that has block
4825 : * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
4826 : * pages are removed.)
4827 : * --------------------------------------------------------------------
4828 : */
4829 : static void
4830 1672 : FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
4831 : BlockNumber nForkBlock,
4832 : BlockNumber firstDelBlock)
4833 : {
4834 : BlockNumber curBlock;
4835 :
4836 4024 : for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
4837 : {
4838 : uint32 bufHash; /* hash value for tag */
4839 : BufferTag bufTag; /* identity of requested block */
4840 : LWLock *bufPartitionLock; /* buffer partition lock for it */
4841 : int buf_id;
4842 : BufferDesc *bufHdr;
4843 : uint32 buf_state;
4844 :
4845 : /* create a tag so we can lookup the buffer */
4846 2352 : InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
4847 :
4848 : /* determine its hash code and partition lock ID */
4849 2352 : bufHash = BufTableHashCode(&bufTag);
4850 2352 : bufPartitionLock = BufMappingPartitionLock(bufHash);
4851 :
4852 : /* Check that it is in the buffer pool. If not, do nothing. */
4853 2352 : LWLockAcquire(bufPartitionLock, LW_SHARED);
4854 2352 : buf_id = BufTableLookup(&bufTag, bufHash);
4855 2352 : LWLockRelease(bufPartitionLock);
4856 :
4857 2352 : if (buf_id < 0)
4858 280 : continue;
4859 :
4860 2072 : bufHdr = GetBufferDescriptor(buf_id);
4861 :
4862 : /*
4863 : * We need to lock the buffer header and recheck if the buffer is
4864 : * still associated with the same block because the buffer could be
4865 : * evicted by some other backend loading blocks for a different
4866 : * relation after we release lock on the BufMapping table.
4867 : */
4868 2072 : buf_state = LockBufHdr(bufHdr);
4869 :
4870 4144 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
4871 2072 : BufTagGetForkNum(&bufHdr->tag) == forkNum &&
4872 2072 : bufHdr->tag.blockNum >= firstDelBlock)
4873 2072 : InvalidateBuffer(bufHdr); /* releases spinlock */
4874 : else
4875 0 : UnlockBufHdr(bufHdr, buf_state);
4876 : }
4877 1672 : }
4878 :
4879 : /* ---------------------------------------------------------------------
4880 : * DropDatabaseBuffers
4881 : *
4882 : * This function removes all the buffers in the buffer cache for a
4883 : * particular database. Dirty pages are simply dropped, without
4884 : * bothering to write them out first. This is used when we destroy a
4885 : * database, to avoid trying to flush data to disk when the directory
4886 : * tree no longer exists. Implementation is pretty similar to
4887 : * DropRelationBuffers() which is for destroying just one relation.
4888 : * --------------------------------------------------------------------
4889 : */
4890 : void
4891 134 : DropDatabaseBuffers(Oid dbid)
4892 : {
4893 : int i;
4894 :
4895 : /*
4896 : * We needn't consider local buffers, since by assumption the target
4897 : * database isn't our own.
4898 : */
4899 :
4900 960134 : for (i = 0; i < NBuffers; i++)
4901 : {
4902 960000 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4903 : uint32 buf_state;
4904 :
4905 : /*
4906 : * As in DropRelationBuffers, an unlocked precheck should be safe and
4907 : * saves some cycles.
4908 : */
4909 960000 : if (bufHdr->tag.dbOid != dbid)
4910 934580 : continue;
4911 :
4912 25420 : buf_state = LockBufHdr(bufHdr);
4913 25420 : if (bufHdr->tag.dbOid == dbid)
4914 25420 : InvalidateBuffer(bufHdr); /* releases spinlock */
4915 : else
4916 0 : UnlockBufHdr(bufHdr, buf_state);
4917 : }
4918 134 : }
4919 :
4920 : /* ---------------------------------------------------------------------
4921 : * FlushRelationBuffers
4922 : *
4923 : * This function writes all dirty pages of a relation out to disk
4924 : * (or more accurately, out to kernel disk buffers), ensuring that the
4925 : * kernel has an up-to-date view of the relation.
4926 : *
4927 : * Generally, the caller should be holding AccessExclusiveLock on the
4928 : * target relation to ensure that no other backend is busy dirtying
4929 : * more blocks of the relation; the effects can't be expected to last
4930 : * after the lock is released.
4931 : *
4932 : * XXX currently it sequentially searches the buffer pool, should be
4933 : * changed to more clever ways of searching. This routine is not
4934 : * used in any performance-critical code paths, so it's not worth
4935 : * adding additional overhead to normal paths to make it go faster.
4936 : * --------------------------------------------------------------------
4937 : */
4938 : void
4939 286 : FlushRelationBuffers(Relation rel)
4940 : {
4941 : int i;
4942 : BufferDesc *bufHdr;
4943 286 : SMgrRelation srel = RelationGetSmgr(rel);
4944 :
4945 286 : if (RelationUsesLocalBuffers(rel))
4946 : {
4947 1818 : for (i = 0; i < NLocBuffer; i++)
4948 : {
4949 : uint32 buf_state;
4950 :
4951 1800 : bufHdr = GetLocalBufferDescriptor(i);
4952 1800 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4953 600 : ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4954 : (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4955 : {
4956 : ErrorContextCallback errcallback;
4957 :
4958 : /* Setup error traceback support for ereport() */
4959 600 : errcallback.callback = local_buffer_write_error_callback;
4960 600 : errcallback.arg = bufHdr;
4961 600 : errcallback.previous = error_context_stack;
4962 600 : error_context_stack = &errcallback;
4963 :
4964 : /* Make sure we can handle the pin */
4965 600 : ReservePrivateRefCountEntry();
4966 600 : ResourceOwnerEnlarge(CurrentResourceOwner);
4967 :
4968 : /*
4969 : * Pin/unpin mostly to make valgrind work, but it also seems
4970 : * like the right thing to do.
4971 : */
4972 600 : PinLocalBuffer(bufHdr, false);
4973 :
4974 :
4975 600 : FlushLocalBuffer(bufHdr, srel);
4976 :
4977 600 : UnpinLocalBuffer(BufferDescriptorGetBuffer(bufHdr));
4978 :
4979 : /* Pop the error context stack */
4980 600 : error_context_stack = errcallback.previous;
4981 : }
4982 : }
4983 :
4984 18 : return;
4985 : }
4986 :
4987 3188236 : for (i = 0; i < NBuffers; i++)
4988 : {
4989 : uint32 buf_state;
4990 :
4991 3187968 : bufHdr = GetBufferDescriptor(i);
4992 :
4993 : /*
4994 : * As in DropRelationBuffers, an unlocked precheck should be safe and
4995 : * saves some cycles.
4996 : */
4997 3187968 : if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4998 3187502 : continue;
4999 :
5000 : /* Make sure we can handle the pin */
5001 466 : ReservePrivateRefCountEntry();
5002 466 : ResourceOwnerEnlarge(CurrentResourceOwner);
5003 :
5004 466 : buf_state = LockBufHdr(bufHdr);
5005 466 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
5006 466 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5007 : {
5008 380 : PinBuffer_Locked(bufHdr);
5009 380 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
5010 380 : FlushBuffer(bufHdr, srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5011 380 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
5012 380 : UnpinBuffer(bufHdr);
5013 : }
5014 : else
5015 86 : UnlockBufHdr(bufHdr, buf_state);
5016 : }
5017 : }
5018 :
5019 : /* ---------------------------------------------------------------------
5020 : * FlushRelationsAllBuffers
5021 : *
5022 : * This function flushes out of the buffer pool all the pages of all
5023 : * forks of the specified smgr relations. It's equivalent to calling
5024 : * FlushRelationBuffers once per relation. The relations are assumed not
5025 : * to use local buffers.
5026 : * --------------------------------------------------------------------
5027 : */
5028 : void
5029 30 : FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
5030 : {
5031 : int i;
5032 : SMgrSortArray *srels;
5033 : bool use_bsearch;
5034 :
5035 30 : if (nrels == 0)
5036 0 : return;
5037 :
5038 : /* fill-in array for qsort */
5039 30 : srels = palloc(sizeof(SMgrSortArray) * nrels);
5040 :
5041 60 : for (i = 0; i < nrels; i++)
5042 : {
5043 : Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
5044 :
5045 30 : srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
5046 30 : srels[i].srel = smgrs[i];
5047 : }
5048 :
5049 : /*
5050 : * Save the bsearch overhead for low number of relations to sync. See
5051 : * DropRelationsAllBuffers for details.
5052 : */
5053 30 : use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
5054 :
5055 : /* sort the list of SMgrRelations if necessary */
5056 30 : if (use_bsearch)
5057 0 : qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
5058 :
5059 491550 : for (i = 0; i < NBuffers; i++)
5060 : {
5061 491520 : SMgrSortArray *srelent = NULL;
5062 491520 : BufferDesc *bufHdr = GetBufferDescriptor(i);
5063 : uint32 buf_state;
5064 :
5065 : /*
5066 : * As in DropRelationBuffers, an unlocked precheck should be safe and
5067 : * saves some cycles.
5068 : */
5069 :
5070 491520 : if (!use_bsearch)
5071 : {
5072 : int j;
5073 :
5074 971868 : for (j = 0; j < nrels; j++)
5075 : {
5076 491520 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
5077 : {
5078 11172 : srelent = &srels[j];
5079 11172 : break;
5080 : }
5081 : }
5082 : }
5083 : else
5084 : {
5085 : RelFileLocator rlocator;
5086 :
5087 0 : rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
5088 0 : srelent = bsearch(&rlocator,
5089 : srels, nrels, sizeof(SMgrSortArray),
5090 : rlocator_comparator);
5091 : }
5092 :
5093 : /* buffer doesn't belong to any of the given relfilelocators; skip it */
5094 491520 : if (srelent == NULL)
5095 480348 : continue;
5096 :
5097 : /* Make sure we can handle the pin */
5098 11172 : ReservePrivateRefCountEntry();
5099 11172 : ResourceOwnerEnlarge(CurrentResourceOwner);
5100 :
5101 11172 : buf_state = LockBufHdr(bufHdr);
5102 11172 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
5103 11172 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5104 : {
5105 9992 : PinBuffer_Locked(bufHdr);
5106 9992 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
5107 9992 : FlushBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5108 9992 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
5109 9992 : UnpinBuffer(bufHdr);
5110 : }
5111 : else
5112 1180 : UnlockBufHdr(bufHdr, buf_state);
5113 : }
5114 :
5115 30 : pfree(srels);
5116 : }
5117 :
5118 : /* ---------------------------------------------------------------------
5119 : * RelationCopyStorageUsingBuffer
5120 : *
5121 : * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
5122 : * of using smgrread and smgrextend this will copy using bufmgr APIs.
5123 : *
5124 : * Refer comments atop CreateAndCopyRelationData() for details about
5125 : * 'permanent' parameter.
5126 : * --------------------------------------------------------------------
5127 : */
5128 : static void
5129 146068 : RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
5130 : RelFileLocator dstlocator,
5131 : ForkNumber forkNum, bool permanent)
5132 : {
5133 : Buffer srcBuf;
5134 : Buffer dstBuf;
5135 : Page srcPage;
5136 : Page dstPage;
5137 : bool use_wal;
5138 : BlockNumber nblocks;
5139 : BlockNumber blkno;
5140 : PGIOAlignedBlock buf;
5141 : BufferAccessStrategy bstrategy_src;
5142 : BufferAccessStrategy bstrategy_dst;
5143 : BlockRangeReadStreamPrivate p;
5144 : ReadStream *src_stream;
5145 : SMgrRelation src_smgr;
5146 :
5147 : /*
5148 : * In general, we want to write WAL whenever wal_level > 'minimal', but we
5149 : * can skip it when copying any fork of an unlogged relation other than
5150 : * the init fork.
5151 : */
5152 146068 : use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
5153 :
5154 : /* Get number of blocks in the source relation. */
5155 146068 : nblocks = smgrnblocks(smgropen(srclocator, INVALID_PROC_NUMBER),
5156 : forkNum);
5157 :
5158 : /* Nothing to copy; just return. */
5159 146068 : if (nblocks == 0)
5160 25478 : return;
5161 :
5162 : /*
5163 : * Bulk extend the destination relation of the same size as the source
5164 : * relation before starting to copy block by block.
5165 : */
5166 120590 : memset(buf.data, 0, BLCKSZ);
5167 120590 : smgrextend(smgropen(dstlocator, INVALID_PROC_NUMBER), forkNum, nblocks - 1,
5168 : buf.data, true);
5169 :
5170 : /* This is a bulk operation, so use buffer access strategies. */
5171 120590 : bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
5172 120590 : bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
5173 :
5174 : /* Initialize streaming read */
5175 120590 : p.current_blocknum = 0;
5176 120590 : p.last_exclusive = nblocks;
5177 120590 : src_smgr = smgropen(srclocator, INVALID_PROC_NUMBER);
5178 :
5179 : /*
5180 : * It is safe to use batchmode as block_range_read_stream_cb takes no
5181 : * locks.
5182 : */
5183 120590 : src_stream = read_stream_begin_smgr_relation(READ_STREAM_FULL |
5184 : READ_STREAM_USE_BATCHING,
5185 : bstrategy_src,
5186 : src_smgr,
5187 : permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
5188 : forkNum,
5189 : block_range_read_stream_cb,
5190 : &p,
5191 : 0);
5192 :
5193 : /* Iterate over each block of the source relation file. */
5194 580442 : for (blkno = 0; blkno < nblocks; blkno++)
5195 : {
5196 459856 : CHECK_FOR_INTERRUPTS();
5197 :
5198 : /* Read block from source relation. */
5199 459856 : srcBuf = read_stream_next_buffer(src_stream, NULL);
5200 459852 : LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
5201 459852 : srcPage = BufferGetPage(srcBuf);
5202 :
5203 459852 : dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum,
5204 : BufferGetBlockNumber(srcBuf),
5205 : RBM_ZERO_AND_LOCK, bstrategy_dst,
5206 : permanent);
5207 459852 : dstPage = BufferGetPage(dstBuf);
5208 :
5209 459852 : START_CRIT_SECTION();
5210 :
5211 : /* Copy page data from the source to the destination. */
5212 459852 : memcpy(dstPage, srcPage, BLCKSZ);
5213 459852 : MarkBufferDirty(dstBuf);
5214 :
5215 : /* WAL-log the copied page. */
5216 459852 : if (use_wal)
5217 247816 : log_newpage_buffer(dstBuf, true);
5218 :
5219 459852 : END_CRIT_SECTION();
5220 :
5221 459852 : UnlockReleaseBuffer(dstBuf);
5222 459852 : UnlockReleaseBuffer(srcBuf);
5223 : }
5224 : Assert(read_stream_next_buffer(src_stream, NULL) == InvalidBuffer);
5225 120586 : read_stream_end(src_stream);
5226 :
5227 120586 : FreeAccessStrategy(bstrategy_src);
5228 120586 : FreeAccessStrategy(bstrategy_dst);
5229 : }
5230 :
5231 : /* ---------------------------------------------------------------------
5232 : * CreateAndCopyRelationData
5233 : *
5234 : * Create destination relation storage and copy all forks from the
5235 : * source relation to the destination.
5236 : *
5237 : * Pass permanent as true for permanent relations and false for
5238 : * unlogged relations. Currently this API is not supported for
5239 : * temporary relations.
5240 : * --------------------------------------------------------------------
5241 : */
5242 : void
5243 109800 : CreateAndCopyRelationData(RelFileLocator src_rlocator,
5244 : RelFileLocator dst_rlocator, bool permanent)
5245 : {
5246 : char relpersistence;
5247 : SMgrRelation src_rel;
5248 : SMgrRelation dst_rel;
5249 :
5250 : /* Set the relpersistence. */
5251 109800 : relpersistence = permanent ?
5252 : RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
5253 :
5254 109800 : src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
5255 109800 : dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
5256 :
5257 : /*
5258 : * Create and copy all forks of the relation. During create database we
5259 : * have a separate cleanup mechanism which deletes complete database
5260 : * directory. Therefore, each individual relation doesn't need to be
5261 : * registered for cleanup.
5262 : */
5263 109800 : RelationCreateStorage(dst_rlocator, relpersistence, false);
5264 :
5265 : /* copy main fork. */
5266 109800 : RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
5267 : permanent);
5268 :
5269 : /* copy those extra forks that exist */
5270 109796 : for (ForkNumber forkNum = MAIN_FORKNUM + 1;
5271 439184 : forkNum <= MAX_FORKNUM; forkNum++)
5272 : {
5273 329388 : if (smgrexists(src_rel, forkNum))
5274 : {
5275 36268 : smgrcreate(dst_rel, forkNum, false);
5276 :
5277 : /*
5278 : * WAL log creation if the relation is persistent, or this is the
5279 : * init fork of an unlogged relation.
5280 : */
5281 36268 : if (permanent || forkNum == INIT_FORKNUM)
5282 36268 : log_smgrcreate(&dst_rlocator, forkNum);
5283 :
5284 : /* Copy a fork's data, block by block. */
5285 36268 : RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
5286 : permanent);
5287 : }
5288 : }
5289 109796 : }
5290 :
5291 : /* ---------------------------------------------------------------------
5292 : * FlushDatabaseBuffers
5293 : *
5294 : * This function writes all dirty pages of a database out to disk
5295 : * (or more accurately, out to kernel disk buffers), ensuring that the
5296 : * kernel has an up-to-date view of the database.
5297 : *
5298 : * Generally, the caller should be holding an appropriate lock to ensure
5299 : * no other backend is active in the target database; otherwise more
5300 : * pages could get dirtied.
5301 : *
5302 : * Note we don't worry about flushing any pages of temporary relations.
5303 : * It's assumed these wouldn't be interesting.
5304 : * --------------------------------------------------------------------
5305 : */
5306 : void
5307 8 : FlushDatabaseBuffers(Oid dbid)
5308 : {
5309 : int i;
5310 : BufferDesc *bufHdr;
5311 :
5312 1032 : for (i = 0; i < NBuffers; i++)
5313 : {
5314 : uint32 buf_state;
5315 :
5316 1024 : bufHdr = GetBufferDescriptor(i);
5317 :
5318 : /*
5319 : * As in DropRelationBuffers, an unlocked precheck should be safe and
5320 : * saves some cycles.
5321 : */
5322 1024 : if (bufHdr->tag.dbOid != dbid)
5323 766 : continue;
5324 :
5325 : /* Make sure we can handle the pin */
5326 258 : ReservePrivateRefCountEntry();
5327 258 : ResourceOwnerEnlarge(CurrentResourceOwner);
5328 :
5329 258 : buf_state = LockBufHdr(bufHdr);
5330 258 : if (bufHdr->tag.dbOid == dbid &&
5331 258 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5332 : {
5333 20 : PinBuffer_Locked(bufHdr);
5334 20 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
5335 20 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5336 20 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
5337 20 : UnpinBuffer(bufHdr);
5338 : }
5339 : else
5340 238 : UnlockBufHdr(bufHdr, buf_state);
5341 : }
5342 8 : }
5343 :
5344 : /*
5345 : * Flush a previously, shared or exclusively, locked and pinned buffer to the
5346 : * OS.
5347 : */
5348 : void
5349 158 : FlushOneBuffer(Buffer buffer)
5350 : {
5351 : BufferDesc *bufHdr;
5352 :
5353 : /* currently not needed, but no fundamental reason not to support */
5354 : Assert(!BufferIsLocal(buffer));
5355 :
5356 : Assert(BufferIsPinned(buffer));
5357 :
5358 158 : bufHdr = GetBufferDescriptor(buffer - 1);
5359 :
5360 : Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
5361 :
5362 158 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5363 158 : }
5364 :
5365 : /*
5366 : * ReleaseBuffer -- release the pin on a buffer
5367 : */
5368 : void
5369 122978012 : ReleaseBuffer(Buffer buffer)
5370 : {
5371 122978012 : if (!BufferIsValid(buffer))
5372 0 : elog(ERROR, "bad buffer ID: %d", buffer);
5373 :
5374 122978012 : if (BufferIsLocal(buffer))
5375 3207820 : UnpinLocalBuffer(buffer);
5376 : else
5377 119770192 : UnpinBuffer(GetBufferDescriptor(buffer - 1));
5378 122978012 : }
5379 :
5380 : /*
5381 : * UnlockReleaseBuffer -- release the content lock and pin on a buffer
5382 : *
5383 : * This is just a shorthand for a common combination.
5384 : */
5385 : void
5386 37735016 : UnlockReleaseBuffer(Buffer buffer)
5387 : {
5388 37735016 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5389 37735016 : ReleaseBuffer(buffer);
5390 37735016 : }
5391 :
5392 : /*
5393 : * IncrBufferRefCount
5394 : * Increment the pin count on a buffer that we have *already* pinned
5395 : * at least once.
5396 : *
5397 : * This function cannot be used on a buffer we do not have pinned,
5398 : * because it doesn't change the shared buffer state.
5399 : */
5400 : void
5401 22878548 : IncrBufferRefCount(Buffer buffer)
5402 : {
5403 : Assert(BufferIsPinned(buffer));
5404 22878548 : ResourceOwnerEnlarge(CurrentResourceOwner);
5405 22878548 : if (BufferIsLocal(buffer))
5406 709056 : LocalRefCount[-buffer - 1]++;
5407 : else
5408 : {
5409 : PrivateRefCountEntry *ref;
5410 :
5411 22169492 : ref = GetPrivateRefCountEntry(buffer, true);
5412 : Assert(ref != NULL);
5413 22169492 : ref->refcount++;
5414 : }
5415 22878548 : ResourceOwnerRememberBuffer(CurrentResourceOwner, buffer);
5416 22878548 : }
5417 :
5418 : /*
5419 : * MarkBufferDirtyHint
5420 : *
5421 : * Mark a buffer dirty for non-critical changes.
5422 : *
5423 : * This is essentially the same as MarkBufferDirty, except:
5424 : *
5425 : * 1. The caller does not write WAL; so if checksums are enabled, we may need
5426 : * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
5427 : * 2. The caller might have only share-lock instead of exclusive-lock on the
5428 : * buffer's content lock.
5429 : * 3. This function does not guarantee that the buffer is always marked dirty
5430 : * (due to a race condition), so it cannot be used for important changes.
5431 : */
5432 : void
5433 20898962 : MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
5434 : {
5435 : BufferDesc *bufHdr;
5436 20898962 : Page page = BufferGetPage(buffer);
5437 :
5438 20898962 : if (!BufferIsValid(buffer))
5439 0 : elog(ERROR, "bad buffer ID: %d", buffer);
5440 :
5441 20898962 : if (BufferIsLocal(buffer))
5442 : {
5443 1264382 : MarkLocalBufferDirty(buffer);
5444 1264382 : return;
5445 : }
5446 :
5447 19634580 : bufHdr = GetBufferDescriptor(buffer - 1);
5448 :
5449 : Assert(GetPrivateRefCount(buffer) > 0);
5450 : /* here, either share or exclusive lock is OK */
5451 : Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
5452 :
5453 : /*
5454 : * This routine might get called many times on the same page, if we are
5455 : * making the first scan after commit of an xact that added/deleted many
5456 : * tuples. So, be as quick as we can if the buffer is already dirty. We
5457 : * do this by not acquiring spinlock if it looks like the status bits are
5458 : * already set. Since we make this test unlocked, there's a chance we
5459 : * might fail to notice that the flags have just been cleared, and failed
5460 : * to reset them, due to memory-ordering issues. But since this function
5461 : * is only intended to be used in cases where failing to write out the
5462 : * data would be harmless anyway, it doesn't really matter.
5463 : */
5464 19634580 : if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
5465 : (BM_DIRTY | BM_JUST_DIRTIED))
5466 : {
5467 1876156 : XLogRecPtr lsn = InvalidXLogRecPtr;
5468 1876156 : bool dirtied = false;
5469 1876156 : bool delayChkptFlags = false;
5470 : uint32 buf_state;
5471 :
5472 : /*
5473 : * If we need to protect hint bit updates from torn writes, WAL-log a
5474 : * full page image of the page. This full page image is only necessary
5475 : * if the hint bit update is the first change to the page since the
5476 : * last checkpoint.
5477 : *
5478 : * We don't check full_page_writes here because that logic is included
5479 : * when we call XLogInsert() since the value changes dynamically.
5480 : */
5481 3750122 : if (XLogHintBitIsNeeded() &&
5482 1873966 : (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
5483 : {
5484 : /*
5485 : * If we must not write WAL, due to a relfilelocator-specific
5486 : * condition or being in recovery, don't dirty the page. We can
5487 : * set the hint, just not dirty the page as a result so the hint
5488 : * is lost when we evict the page or shutdown.
5489 : *
5490 : * See src/backend/storage/page/README for longer discussion.
5491 : */
5492 2000418 : if (RecoveryInProgress() ||
5493 126516 : RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
5494 1753396 : return;
5495 :
5496 : /*
5497 : * If the block is already dirty because we either made a change
5498 : * or set a hint already, then we don't need to write a full page
5499 : * image. Note that aggressive cleaning of blocks dirtied by hint
5500 : * bit setting would increase the call rate. Bulk setting of hint
5501 : * bits would reduce the call rate...
5502 : *
5503 : * We must issue the WAL record before we mark the buffer dirty.
5504 : * Otherwise we might write the page before we write the WAL. That
5505 : * causes a race condition, since a checkpoint might occur between
5506 : * writing the WAL record and marking the buffer dirty. We solve
5507 : * that with a kluge, but one that is already in use during
5508 : * transaction commit to prevent race conditions. Basically, we
5509 : * simply prevent the checkpoint WAL record from being written
5510 : * until we have marked the buffer dirty. We don't start the
5511 : * checkpoint flush until we have marked dirty, so our checkpoint
5512 : * must flush the change to disk successfully or the checkpoint
5513 : * never gets written, so crash recovery will fix.
5514 : *
5515 : * It's possible we may enter here without an xid, so it is
5516 : * essential that CreateCheckPoint waits for virtual transactions
5517 : * rather than full transactionids.
5518 : */
5519 : Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
5520 120506 : MyProc->delayChkptFlags |= DELAY_CHKPT_START;
5521 120506 : delayChkptFlags = true;
5522 120506 : lsn = XLogSaveBufferForHint(buffer, buffer_std);
5523 : }
5524 :
5525 122760 : buf_state = LockBufHdr(bufHdr);
5526 :
5527 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5528 :
5529 122760 : if (!(buf_state & BM_DIRTY))
5530 : {
5531 122698 : dirtied = true; /* Means "will be dirtied by this action" */
5532 :
5533 : /*
5534 : * Set the page LSN if we wrote a backup block. We aren't supposed
5535 : * to set this when only holding a share lock but as long as we
5536 : * serialise it somehow we're OK. We choose to set LSN while
5537 : * holding the buffer header lock, which causes any reader of an
5538 : * LSN who holds only a share lock to also obtain a buffer header
5539 : * lock before using PageGetLSN(), which is enforced in
5540 : * BufferGetLSNAtomic().
5541 : *
5542 : * If checksums are enabled, you might think we should reset the
5543 : * checksum here. That will happen when the page is written
5544 : * sometime later in this checkpoint cycle.
5545 : */
5546 122698 : if (!XLogRecPtrIsInvalid(lsn))
5547 65400 : PageSetLSN(page, lsn);
5548 : }
5549 :
5550 122760 : buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
5551 122760 : UnlockBufHdr(bufHdr, buf_state);
5552 :
5553 122760 : if (delayChkptFlags)
5554 120506 : MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
5555 :
5556 122760 : if (dirtied)
5557 : {
5558 122698 : pgBufferUsage.shared_blks_dirtied++;
5559 122698 : if (VacuumCostActive)
5560 3530 : VacuumCostBalance += VacuumCostPageDirty;
5561 : }
5562 : }
5563 : }
5564 :
5565 : /*
5566 : * Release buffer content locks for shared buffers.
5567 : *
5568 : * Used to clean up after errors.
5569 : *
5570 : * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
5571 : * of releasing buffer content locks per se; the only thing we need to deal
5572 : * with here is clearing any PIN_COUNT request that was in progress.
5573 : */
5574 : void
5575 100104 : UnlockBuffers(void)
5576 : {
5577 100104 : BufferDesc *buf = PinCountWaitBuf;
5578 :
5579 100104 : if (buf)
5580 : {
5581 : uint32 buf_state;
5582 :
5583 0 : buf_state = LockBufHdr(buf);
5584 :
5585 : /*
5586 : * Don't complain if flag bit not set; it could have been reset but we
5587 : * got a cancel/die interrupt before getting the signal.
5588 : */
5589 0 : if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5590 0 : buf->wait_backend_pgprocno == MyProcNumber)
5591 0 : buf_state &= ~BM_PIN_COUNT_WAITER;
5592 :
5593 0 : UnlockBufHdr(buf, buf_state);
5594 :
5595 0 : PinCountWaitBuf = NULL;
5596 : }
5597 100104 : }
5598 :
5599 : /*
5600 : * Acquire or release the content_lock for the buffer.
5601 : */
5602 : void
5603 344297134 : LockBuffer(Buffer buffer, int mode)
5604 : {
5605 : BufferDesc *buf;
5606 :
5607 : Assert(BufferIsPinned(buffer));
5608 344297134 : if (BufferIsLocal(buffer))
5609 19757476 : return; /* local buffers need no lock */
5610 :
5611 324539658 : buf = GetBufferDescriptor(buffer - 1);
5612 :
5613 324539658 : if (mode == BUFFER_LOCK_UNLOCK)
5614 164185762 : LWLockRelease(BufferDescriptorGetContentLock(buf));
5615 160353896 : else if (mode == BUFFER_LOCK_SHARE)
5616 114009954 : LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_SHARED);
5617 46343942 : else if (mode == BUFFER_LOCK_EXCLUSIVE)
5618 46343942 : LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_EXCLUSIVE);
5619 : else
5620 0 : elog(ERROR, "unrecognized buffer lock mode: %d", mode);
5621 : }
5622 :
5623 : /*
5624 : * Acquire the content_lock for the buffer, but only if we don't have to wait.
5625 : *
5626 : * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
5627 : */
5628 : bool
5629 3162978 : ConditionalLockBuffer(Buffer buffer)
5630 : {
5631 : BufferDesc *buf;
5632 :
5633 : Assert(BufferIsPinned(buffer));
5634 3162978 : if (BufferIsLocal(buffer))
5635 129182 : return true; /* act as though we got it */
5636 :
5637 3033796 : buf = GetBufferDescriptor(buffer - 1);
5638 :
5639 3033796 : return LWLockConditionalAcquire(BufferDescriptorGetContentLock(buf),
5640 : LW_EXCLUSIVE);
5641 : }
5642 :
5643 : /*
5644 : * Verify that this backend is pinning the buffer exactly once.
5645 : *
5646 : * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
5647 : * holds a pin on the buffer. We do not care whether some other backend does.
5648 : */
5649 : void
5650 4402562 : CheckBufferIsPinnedOnce(Buffer buffer)
5651 : {
5652 4402562 : if (BufferIsLocal(buffer))
5653 : {
5654 1578 : if (LocalRefCount[-buffer - 1] != 1)
5655 0 : elog(ERROR, "incorrect local pin count: %d",
5656 : LocalRefCount[-buffer - 1]);
5657 : }
5658 : else
5659 : {
5660 4400984 : if (GetPrivateRefCount(buffer) != 1)
5661 0 : elog(ERROR, "incorrect local pin count: %d",
5662 : GetPrivateRefCount(buffer));
5663 : }
5664 4402562 : }
5665 :
5666 : /*
5667 : * LockBufferForCleanup - lock a buffer in preparation for deleting items
5668 : *
5669 : * Items may be deleted from a disk page only when the caller (a) holds an
5670 : * exclusive lock on the buffer and (b) has observed that no other backend
5671 : * holds a pin on the buffer. If there is a pin, then the other backend
5672 : * might have a pointer into the buffer (for example, a heapscan reference
5673 : * to an item --- see README for more details). It's OK if a pin is added
5674 : * after the cleanup starts, however; the newly-arrived backend will be
5675 : * unable to look at the page until we release the exclusive lock.
5676 : *
5677 : * To implement this protocol, a would-be deleter must pin the buffer and
5678 : * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
5679 : * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
5680 : * it has successfully observed pin count = 1.
5681 : */
5682 : void
5683 42390 : LockBufferForCleanup(Buffer buffer)
5684 : {
5685 : BufferDesc *bufHdr;
5686 42390 : TimestampTz waitStart = 0;
5687 42390 : bool waiting = false;
5688 42390 : bool logged_recovery_conflict = false;
5689 :
5690 : Assert(BufferIsPinned(buffer));
5691 : Assert(PinCountWaitBuf == NULL);
5692 :
5693 42390 : CheckBufferIsPinnedOnce(buffer);
5694 :
5695 : /*
5696 : * We do not yet need to be worried about in-progress AIOs holding a pin,
5697 : * as we, so far, only support doing reads via AIO and this function can
5698 : * only be called once the buffer is valid (i.e. no read can be in
5699 : * flight).
5700 : */
5701 :
5702 : /* Nobody else to wait for */
5703 42390 : if (BufferIsLocal(buffer))
5704 32 : return;
5705 :
5706 42358 : bufHdr = GetBufferDescriptor(buffer - 1);
5707 :
5708 : for (;;)
5709 160 : {
5710 : uint32 buf_state;
5711 :
5712 : /* Try to acquire lock */
5713 42518 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5714 42518 : buf_state = LockBufHdr(bufHdr);
5715 :
5716 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5717 42518 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5718 : {
5719 : /* Successfully acquired exclusive lock with pincount 1 */
5720 42358 : UnlockBufHdr(bufHdr, buf_state);
5721 :
5722 : /*
5723 : * Emit the log message if recovery conflict on buffer pin was
5724 : * resolved but the startup process waited longer than
5725 : * deadlock_timeout for it.
5726 : */
5727 42358 : if (logged_recovery_conflict)
5728 4 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
5729 : waitStart, GetCurrentTimestamp(),
5730 : NULL, false);
5731 :
5732 42358 : if (waiting)
5733 : {
5734 : /* reset ps display to remove the suffix if we added one */
5735 4 : set_ps_display_remove_suffix();
5736 4 : waiting = false;
5737 : }
5738 42358 : return;
5739 : }
5740 : /* Failed, so mark myself as waiting for pincount 1 */
5741 160 : if (buf_state & BM_PIN_COUNT_WAITER)
5742 : {
5743 0 : UnlockBufHdr(bufHdr, buf_state);
5744 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5745 0 : elog(ERROR, "multiple backends attempting to wait for pincount 1");
5746 : }
5747 160 : bufHdr->wait_backend_pgprocno = MyProcNumber;
5748 160 : PinCountWaitBuf = bufHdr;
5749 160 : buf_state |= BM_PIN_COUNT_WAITER;
5750 160 : UnlockBufHdr(bufHdr, buf_state);
5751 160 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5752 :
5753 : /* Wait to be signaled by UnpinBuffer() */
5754 160 : if (InHotStandby)
5755 : {
5756 20 : if (!waiting)
5757 : {
5758 : /* adjust the process title to indicate that it's waiting */
5759 4 : set_ps_display_suffix("waiting");
5760 4 : waiting = true;
5761 : }
5762 :
5763 : /*
5764 : * Emit the log message if the startup process is waiting longer
5765 : * than deadlock_timeout for recovery conflict on buffer pin.
5766 : *
5767 : * Skip this if first time through because the startup process has
5768 : * not started waiting yet in this case. So, the wait start
5769 : * timestamp is set after this logic.
5770 : */
5771 20 : if (waitStart != 0 && !logged_recovery_conflict)
5772 : {
5773 8 : TimestampTz now = GetCurrentTimestamp();
5774 :
5775 8 : if (TimestampDifferenceExceeds(waitStart, now,
5776 : DeadlockTimeout))
5777 : {
5778 4 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
5779 : waitStart, now, NULL, true);
5780 4 : logged_recovery_conflict = true;
5781 : }
5782 : }
5783 :
5784 : /*
5785 : * Set the wait start timestamp if logging is enabled and first
5786 : * time through.
5787 : */
5788 20 : if (log_recovery_conflict_waits && waitStart == 0)
5789 4 : waitStart = GetCurrentTimestamp();
5790 :
5791 : /* Publish the bufid that Startup process waits on */
5792 20 : SetStartupBufferPinWaitBufId(buffer - 1);
5793 : /* Set alarm and then wait to be signaled by UnpinBuffer() */
5794 20 : ResolveRecoveryConflictWithBufferPin();
5795 : /* Reset the published bufid */
5796 20 : SetStartupBufferPinWaitBufId(-1);
5797 : }
5798 : else
5799 140 : ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
5800 :
5801 : /*
5802 : * Remove flag marking us as waiter. Normally this will not be set
5803 : * anymore, but ProcWaitForSignal() can return for other signals as
5804 : * well. We take care to only reset the flag if we're the waiter, as
5805 : * theoretically another backend could have started waiting. That's
5806 : * impossible with the current usages due to table level locking, but
5807 : * better be safe.
5808 : */
5809 160 : buf_state = LockBufHdr(bufHdr);
5810 160 : if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5811 16 : bufHdr->wait_backend_pgprocno == MyProcNumber)
5812 16 : buf_state &= ~BM_PIN_COUNT_WAITER;
5813 160 : UnlockBufHdr(bufHdr, buf_state);
5814 :
5815 160 : PinCountWaitBuf = NULL;
5816 : /* Loop back and try again */
5817 : }
5818 : }
5819 :
5820 : /*
5821 : * Check called from ProcessRecoveryConflictInterrupts() when Startup process
5822 : * requests cancellation of all pin holders that are blocking it.
5823 : */
5824 : bool
5825 6 : HoldingBufferPinThatDelaysRecovery(void)
5826 : {
5827 6 : int bufid = GetStartupBufferPinWaitBufId();
5828 :
5829 : /*
5830 : * If we get woken slowly then it's possible that the Startup process was
5831 : * already woken by other backends before we got here. Also possible that
5832 : * we get here by multiple interrupts or interrupts at inappropriate
5833 : * times, so make sure we do nothing if the bufid is not set.
5834 : */
5835 6 : if (bufid < 0)
5836 2 : return false;
5837 :
5838 4 : if (GetPrivateRefCount(bufid + 1) > 0)
5839 4 : return true;
5840 :
5841 0 : return false;
5842 : }
5843 :
5844 : /*
5845 : * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
5846 : *
5847 : * We won't loop, but just check once to see if the pin count is OK. If
5848 : * not, return false with no lock held.
5849 : */
5850 : bool
5851 698002 : ConditionalLockBufferForCleanup(Buffer buffer)
5852 : {
5853 : BufferDesc *bufHdr;
5854 : uint32 buf_state,
5855 : refcount;
5856 :
5857 : Assert(BufferIsValid(buffer));
5858 :
5859 : /* see AIO related comment in LockBufferForCleanup() */
5860 :
5861 698002 : if (BufferIsLocal(buffer))
5862 : {
5863 1600 : refcount = LocalRefCount[-buffer - 1];
5864 : /* There should be exactly one pin */
5865 : Assert(refcount > 0);
5866 1600 : if (refcount != 1)
5867 42 : return false;
5868 : /* Nobody else to wait for */
5869 1558 : return true;
5870 : }
5871 :
5872 : /* There should be exactly one local pin */
5873 696402 : refcount = GetPrivateRefCount(buffer);
5874 : Assert(refcount);
5875 696402 : if (refcount != 1)
5876 416 : return false;
5877 :
5878 : /* Try to acquire lock */
5879 695986 : if (!ConditionalLockBuffer(buffer))
5880 66 : return false;
5881 :
5882 695920 : bufHdr = GetBufferDescriptor(buffer - 1);
5883 695920 : buf_state = LockBufHdr(bufHdr);
5884 695920 : refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5885 :
5886 : Assert(refcount > 0);
5887 695920 : if (refcount == 1)
5888 : {
5889 : /* Successfully acquired exclusive lock with pincount 1 */
5890 695398 : UnlockBufHdr(bufHdr, buf_state);
5891 695398 : return true;
5892 : }
5893 :
5894 : /* Failed, so release the lock */
5895 522 : UnlockBufHdr(bufHdr, buf_state);
5896 522 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5897 522 : return false;
5898 : }
5899 :
5900 : /*
5901 : * IsBufferCleanupOK - as above, but we already have the lock
5902 : *
5903 : * Check whether it's OK to perform cleanup on a buffer we've already
5904 : * locked. If we observe that the pin count is 1, our exclusive lock
5905 : * happens to be a cleanup lock, and we can proceed with anything that
5906 : * would have been allowable had we sought a cleanup lock originally.
5907 : */
5908 : bool
5909 4072 : IsBufferCleanupOK(Buffer buffer)
5910 : {
5911 : BufferDesc *bufHdr;
5912 : uint32 buf_state;
5913 :
5914 : Assert(BufferIsValid(buffer));
5915 :
5916 : /* see AIO related comment in LockBufferForCleanup() */
5917 :
5918 4072 : if (BufferIsLocal(buffer))
5919 : {
5920 : /* There should be exactly one pin */
5921 0 : if (LocalRefCount[-buffer - 1] != 1)
5922 0 : return false;
5923 : /* Nobody else to wait for */
5924 0 : return true;
5925 : }
5926 :
5927 : /* There should be exactly one local pin */
5928 4072 : if (GetPrivateRefCount(buffer) != 1)
5929 0 : return false;
5930 :
5931 4072 : bufHdr = GetBufferDescriptor(buffer - 1);
5932 :
5933 : /* caller must hold exclusive lock on buffer */
5934 : Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
5935 : LW_EXCLUSIVE));
5936 :
5937 4072 : buf_state = LockBufHdr(bufHdr);
5938 :
5939 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5940 4072 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5941 : {
5942 : /* pincount is OK. */
5943 4072 : UnlockBufHdr(bufHdr, buf_state);
5944 4072 : return true;
5945 : }
5946 :
5947 0 : UnlockBufHdr(bufHdr, buf_state);
5948 0 : return false;
5949 : }
5950 :
5951 :
5952 : /*
5953 : * Functions for buffer I/O handling
5954 : *
5955 : * Also note that these are used only for shared buffers, not local ones.
5956 : */
5957 :
5958 : /*
5959 : * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
5960 : */
5961 : static void
5962 13160 : WaitIO(BufferDesc *buf)
5963 : {
5964 13160 : ConditionVariable *cv = BufferDescriptorGetIOCV(buf);
5965 :
5966 13160 : ConditionVariablePrepareToSleep(cv);
5967 : for (;;)
5968 13078 : {
5969 : uint32 buf_state;
5970 : PgAioWaitRef iow;
5971 :
5972 : /*
5973 : * It may not be necessary to acquire the spinlock to check the flag
5974 : * here, but since this test is essential for correctness, we'd better
5975 : * play it safe.
5976 : */
5977 26238 : buf_state = LockBufHdr(buf);
5978 :
5979 : /*
5980 : * Copy the wait reference while holding the spinlock. This protects
5981 : * against a concurrent TerminateBufferIO() in another backend from
5982 : * clearing the wref while it's being read.
5983 : */
5984 26238 : iow = buf->io_wref;
5985 26238 : UnlockBufHdr(buf, buf_state);
5986 :
5987 : /* no IO in progress, we don't need to wait */
5988 26238 : if (!(buf_state & BM_IO_IN_PROGRESS))
5989 13160 : break;
5990 :
5991 : /*
5992 : * The buffer has asynchronous IO in progress, wait for it to
5993 : * complete.
5994 : */
5995 13078 : if (pgaio_wref_valid(&iow))
5996 : {
5997 10628 : pgaio_wref_wait(&iow);
5998 :
5999 : /*
6000 : * The AIO subsystem internally uses condition variables and thus
6001 : * might remove this backend from the BufferDesc's CV. While that
6002 : * wouldn't cause a correctness issue (the first CV sleep just
6003 : * immediately returns if not already registered), it seems worth
6004 : * avoiding unnecessary loop iterations, given that we take care
6005 : * to do so at the start of the function.
6006 : */
6007 10628 : ConditionVariablePrepareToSleep(cv);
6008 10628 : continue;
6009 : }
6010 :
6011 : /* wait on BufferDesc->cv, e.g. for concurrent synchronous IO */
6012 2450 : ConditionVariableSleep(cv, WAIT_EVENT_BUFFER_IO);
6013 : }
6014 13160 : ConditionVariableCancelSleep();
6015 13160 : }
6016 :
6017 : /*
6018 : * StartBufferIO: begin I/O on this buffer
6019 : * (Assumptions)
6020 : * My process is executing no IO on this buffer
6021 : * The buffer is Pinned
6022 : *
6023 : * In some scenarios multiple backends could attempt the same I/O operation
6024 : * concurrently. If someone else has already started I/O on this buffer then
6025 : * we will wait for completion of the IO using WaitIO().
6026 : *
6027 : * Input operations are only attempted on buffers that are not BM_VALID,
6028 : * and output operations only on buffers that are BM_VALID and BM_DIRTY,
6029 : * so we can always tell if the work is already done.
6030 : *
6031 : * Returns true if we successfully marked the buffer as I/O busy,
6032 : * false if someone else already did the work.
6033 : *
6034 : * If nowait is true, then we don't wait for an I/O to be finished by another
6035 : * backend. In that case, false indicates either that the I/O was already
6036 : * finished, or is still in progress. This is useful for callers that want to
6037 : * find out if they can perform the I/O as part of a larger operation, without
6038 : * waiting for the answer or distinguishing the reasons why not.
6039 : */
6040 : bool
6041 4852624 : StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
6042 : {
6043 : uint32 buf_state;
6044 :
6045 4852624 : ResourceOwnerEnlarge(CurrentResourceOwner);
6046 :
6047 : for (;;)
6048 : {
6049 4865782 : buf_state = LockBufHdr(buf);
6050 :
6051 4865782 : if (!(buf_state & BM_IO_IN_PROGRESS))
6052 4852616 : break;
6053 13166 : UnlockBufHdr(buf, buf_state);
6054 13166 : if (nowait)
6055 8 : return false;
6056 13158 : WaitIO(buf);
6057 : }
6058 :
6059 : /* Once we get here, there is definitely no I/O active on this buffer */
6060 :
6061 : /* Check if someone else already did the I/O */
6062 4852616 : if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
6063 : {
6064 13836 : UnlockBufHdr(buf, buf_state);
6065 13836 : return false;
6066 : }
6067 :
6068 4838780 : buf_state |= BM_IO_IN_PROGRESS;
6069 4838780 : UnlockBufHdr(buf, buf_state);
6070 :
6071 4838780 : ResourceOwnerRememberBufferIO(CurrentResourceOwner,
6072 : BufferDescriptorGetBuffer(buf));
6073 :
6074 4838780 : return true;
6075 : }
6076 :
6077 : /*
6078 : * TerminateBufferIO: release a buffer we were doing I/O on
6079 : * (Assumptions)
6080 : * My process is executing IO for the buffer
6081 : * BM_IO_IN_PROGRESS bit is set for the buffer
6082 : * The buffer is Pinned
6083 : *
6084 : * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
6085 : * buffer's BM_DIRTY flag. This is appropriate when terminating a
6086 : * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
6087 : * marking the buffer clean if it was re-dirtied while we were writing.
6088 : *
6089 : * set_flag_bits gets ORed into the buffer's flags. It must include
6090 : * BM_IO_ERROR in a failure case. For successful completion it could
6091 : * be 0, or BM_VALID if we just finished reading in the page.
6092 : *
6093 : * If forget_owner is true, we release the buffer I/O from the current
6094 : * resource owner. (forget_owner=false is used when the resource owner itself
6095 : * is being released)
6096 : */
6097 : void
6098 4564376 : TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
6099 : bool forget_owner, bool release_aio)
6100 : {
6101 : uint32 buf_state;
6102 :
6103 4564376 : buf_state = LockBufHdr(buf);
6104 :
6105 : Assert(buf_state & BM_IO_IN_PROGRESS);
6106 4564376 : buf_state &= ~BM_IO_IN_PROGRESS;
6107 :
6108 : /* Clear earlier errors, if this IO failed, it'll be marked again */
6109 4564376 : buf_state &= ~BM_IO_ERROR;
6110 :
6111 4564376 : if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
6112 1102510 : buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
6113 :
6114 4564376 : if (release_aio)
6115 : {
6116 : /* release ownership by the AIO subsystem */
6117 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
6118 2453646 : buf_state -= BUF_REFCOUNT_ONE;
6119 2453646 : pgaio_wref_clear(&buf->io_wref);
6120 : }
6121 :
6122 4564376 : buf_state |= set_flag_bits;
6123 4564376 : UnlockBufHdr(buf, buf_state);
6124 :
6125 4564376 : if (forget_owner)
6126 2110688 : ResourceOwnerForgetBufferIO(CurrentResourceOwner,
6127 : BufferDescriptorGetBuffer(buf));
6128 :
6129 4564376 : ConditionVariableBroadcast(BufferDescriptorGetIOCV(buf));
6130 :
6131 : /*
6132 : * Support LockBufferForCleanup()
6133 : *
6134 : * We may have just released the last pin other than the waiter's. In most
6135 : * cases, this backend holds another pin on the buffer. But, if, for
6136 : * example, this backend is completing an IO issued by another backend, it
6137 : * may be time to wake the waiter.
6138 : */
6139 4564376 : if (release_aio && (buf_state & BM_PIN_COUNT_WAITER))
6140 0 : WakePinCountWaiter(buf);
6141 4564376 : }
6142 :
6143 : /*
6144 : * AbortBufferIO: Clean up active buffer I/O after an error.
6145 : *
6146 : * All LWLocks we might have held have been released,
6147 : * but we haven't yet released buffer pins, so the buffer is still pinned.
6148 : *
6149 : * If I/O was in progress, we always set BM_IO_ERROR, even though it's
6150 : * possible the error condition wasn't related to the I/O.
6151 : *
6152 : * Note: this does not remove the buffer I/O from the resource owner.
6153 : * That's correct when we're releasing the whole resource owner, but
6154 : * beware if you use this in other contexts.
6155 : */
6156 : static void
6157 30 : AbortBufferIO(Buffer buffer)
6158 : {
6159 30 : BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
6160 : uint32 buf_state;
6161 :
6162 30 : buf_state = LockBufHdr(buf_hdr);
6163 : Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
6164 :
6165 30 : if (!(buf_state & BM_VALID))
6166 : {
6167 : Assert(!(buf_state & BM_DIRTY));
6168 30 : UnlockBufHdr(buf_hdr, buf_state);
6169 : }
6170 : else
6171 : {
6172 : Assert(buf_state & BM_DIRTY);
6173 0 : UnlockBufHdr(buf_hdr, buf_state);
6174 :
6175 : /* Issue notice if this is not the first failure... */
6176 0 : if (buf_state & BM_IO_ERROR)
6177 : {
6178 : /* Buffer is pinned, so we can read tag without spinlock */
6179 0 : ereport(WARNING,
6180 : (errcode(ERRCODE_IO_ERROR),
6181 : errmsg("could not write block %u of %s",
6182 : buf_hdr->tag.blockNum,
6183 : relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
6184 : BufTagGetForkNum(&buf_hdr->tag)).str),
6185 : errdetail("Multiple failures --- write error might be permanent.")));
6186 : }
6187 : }
6188 :
6189 30 : TerminateBufferIO(buf_hdr, false, BM_IO_ERROR, false, false);
6190 30 : }
6191 :
6192 : /*
6193 : * Error context callback for errors occurring during shared buffer writes.
6194 : */
6195 : static void
6196 94 : shared_buffer_write_error_callback(void *arg)
6197 : {
6198 94 : BufferDesc *bufHdr = (BufferDesc *) arg;
6199 :
6200 : /* Buffer is pinned, so we can read the tag without locking the spinlock */
6201 94 : if (bufHdr != NULL)
6202 188 : errcontext("writing block %u of relation %s",
6203 : bufHdr->tag.blockNum,
6204 94 : relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
6205 : BufTagGetForkNum(&bufHdr->tag)).str);
6206 94 : }
6207 :
6208 : /*
6209 : * Error context callback for errors occurring during local buffer writes.
6210 : */
6211 : static void
6212 0 : local_buffer_write_error_callback(void *arg)
6213 : {
6214 0 : BufferDesc *bufHdr = (BufferDesc *) arg;
6215 :
6216 0 : if (bufHdr != NULL)
6217 0 : errcontext("writing block %u of relation %s",
6218 : bufHdr->tag.blockNum,
6219 0 : relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
6220 : MyProcNumber,
6221 : BufTagGetForkNum(&bufHdr->tag)).str);
6222 0 : }
6223 :
6224 : /*
6225 : * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
6226 : */
6227 : static int
6228 18432594 : rlocator_comparator(const void *p1, const void *p2)
6229 : {
6230 18432594 : RelFileLocator n1 = *(const RelFileLocator *) p1;
6231 18432594 : RelFileLocator n2 = *(const RelFileLocator *) p2;
6232 :
6233 18432594 : if (n1.relNumber < n2.relNumber)
6234 18358522 : return -1;
6235 74072 : else if (n1.relNumber > n2.relNumber)
6236 70634 : return 1;
6237 :
6238 3438 : if (n1.dbOid < n2.dbOid)
6239 0 : return -1;
6240 3438 : else if (n1.dbOid > n2.dbOid)
6241 0 : return 1;
6242 :
6243 3438 : if (n1.spcOid < n2.spcOid)
6244 0 : return -1;
6245 3438 : else if (n1.spcOid > n2.spcOid)
6246 0 : return 1;
6247 : else
6248 3438 : return 0;
6249 : }
6250 :
6251 : /*
6252 : * Lock buffer header - set BM_LOCKED in buffer state.
6253 : */
6254 : uint32
6255 71789650 : LockBufHdr(BufferDesc *desc)
6256 : {
6257 : SpinDelayStatus delayStatus;
6258 : uint32 old_buf_state;
6259 :
6260 : Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
6261 :
6262 71789650 : init_local_spin_delay(&delayStatus);
6263 :
6264 : while (true)
6265 : {
6266 : /* set BM_LOCKED flag */
6267 71823654 : old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
6268 : /* if it wasn't set before we're OK */
6269 71823654 : if (!(old_buf_state & BM_LOCKED))
6270 71789650 : break;
6271 34004 : perform_spin_delay(&delayStatus);
6272 : }
6273 71789650 : finish_spin_delay(&delayStatus);
6274 71789650 : return old_buf_state | BM_LOCKED;
6275 : }
6276 :
6277 : /*
6278 : * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
6279 : * state at that point.
6280 : *
6281 : * Obviously the buffer could be locked by the time the value is returned, so
6282 : * this is primarily useful in CAS style loops.
6283 : */
6284 : static uint32
6285 2538 : WaitBufHdrUnlocked(BufferDesc *buf)
6286 : {
6287 : SpinDelayStatus delayStatus;
6288 : uint32 buf_state;
6289 :
6290 2538 : init_local_spin_delay(&delayStatus);
6291 :
6292 2538 : buf_state = pg_atomic_read_u32(&buf->state);
6293 :
6294 25208 : while (buf_state & BM_LOCKED)
6295 : {
6296 22670 : perform_spin_delay(&delayStatus);
6297 22670 : buf_state = pg_atomic_read_u32(&buf->state);
6298 : }
6299 :
6300 2538 : finish_spin_delay(&delayStatus);
6301 :
6302 2538 : return buf_state;
6303 : }
6304 :
6305 : /*
6306 : * BufferTag comparator.
6307 : */
6308 : static inline int
6309 0 : buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
6310 : {
6311 : int ret;
6312 : RelFileLocator rlocatora;
6313 : RelFileLocator rlocatorb;
6314 :
6315 0 : rlocatora = BufTagGetRelFileLocator(ba);
6316 0 : rlocatorb = BufTagGetRelFileLocator(bb);
6317 :
6318 0 : ret = rlocator_comparator(&rlocatora, &rlocatorb);
6319 :
6320 0 : if (ret != 0)
6321 0 : return ret;
6322 :
6323 0 : if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
6324 0 : return -1;
6325 0 : if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
6326 0 : return 1;
6327 :
6328 0 : if (ba->blockNum < bb->blockNum)
6329 0 : return -1;
6330 0 : if (ba->blockNum > bb->blockNum)
6331 0 : return 1;
6332 :
6333 0 : return 0;
6334 : }
6335 :
6336 : /*
6337 : * Comparator determining the writeout order in a checkpoint.
6338 : *
6339 : * It is important that tablespaces are compared first, the logic balancing
6340 : * writes between tablespaces relies on it.
6341 : */
6342 : static inline int
6343 5905652 : ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
6344 : {
6345 : /* compare tablespace */
6346 5905652 : if (a->tsId < b->tsId)
6347 13530 : return -1;
6348 5892122 : else if (a->tsId > b->tsId)
6349 42200 : return 1;
6350 : /* compare relation */
6351 5849922 : if (a->relNumber < b->relNumber)
6352 1623418 : return -1;
6353 4226504 : else if (a->relNumber > b->relNumber)
6354 1605484 : return 1;
6355 : /* compare fork */
6356 2621020 : else if (a->forkNum < b->forkNum)
6357 109458 : return -1;
6358 2511562 : else if (a->forkNum > b->forkNum)
6359 119260 : return 1;
6360 : /* compare block number */
6361 2392302 : else if (a->blockNum < b->blockNum)
6362 1168898 : return -1;
6363 1223404 : else if (a->blockNum > b->blockNum)
6364 1139424 : return 1;
6365 : /* equal page IDs are unlikely, but not impossible */
6366 83980 : return 0;
6367 : }
6368 :
6369 : /*
6370 : * Comparator for a Min-Heap over the per-tablespace checkpoint completion
6371 : * progress.
6372 : */
6373 : static int
6374 462014 : ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
6375 : {
6376 462014 : CkptTsStatus *sa = (CkptTsStatus *) a;
6377 462014 : CkptTsStatus *sb = (CkptTsStatus *) b;
6378 :
6379 : /* we want a min-heap, so return 1 for the a < b */
6380 462014 : if (sa->progress < sb->progress)
6381 440626 : return 1;
6382 21388 : else if (sa->progress == sb->progress)
6383 1878 : return 0;
6384 : else
6385 19510 : return -1;
6386 : }
6387 :
6388 : /*
6389 : * Initialize a writeback context, discarding potential previous state.
6390 : *
6391 : * *max_pending is a pointer instead of an immediate value, so the coalesce
6392 : * limits can easily changed by the GUC mechanism, and so calling code does
6393 : * not have to check the current configuration. A value of 0 means that no
6394 : * writeback control will be performed.
6395 : */
6396 : void
6397 5234 : WritebackContextInit(WritebackContext *context, int *max_pending)
6398 : {
6399 : Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
6400 :
6401 5234 : context->max_pending = max_pending;
6402 5234 : context->nr_pending = 0;
6403 5234 : }
6404 :
6405 : /*
6406 : * Add buffer to list of pending writeback requests.
6407 : */
6408 : void
6409 1090102 : ScheduleBufferTagForWriteback(WritebackContext *wb_context, IOContext io_context,
6410 : BufferTag *tag)
6411 : {
6412 : PendingWriteback *pending;
6413 :
6414 : /*
6415 : * As pg_flush_data() doesn't do anything with fsync disabled, there's no
6416 : * point in tracking in that case.
6417 : */
6418 1090102 : if (io_direct_flags & IO_DIRECT_DATA ||
6419 1089062 : !enableFsync)
6420 1090096 : return;
6421 :
6422 : /*
6423 : * Add buffer to the pending writeback array, unless writeback control is
6424 : * disabled.
6425 : */
6426 6 : if (*wb_context->max_pending > 0)
6427 : {
6428 : Assert(*wb_context->max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
6429 :
6430 0 : pending = &wb_context->pending_writebacks[wb_context->nr_pending++];
6431 :
6432 0 : pending->tag = *tag;
6433 : }
6434 :
6435 : /*
6436 : * Perform pending flushes if the writeback limit is exceeded. This
6437 : * includes the case where previously an item has been added, but control
6438 : * is now disabled.
6439 : */
6440 6 : if (wb_context->nr_pending >= *wb_context->max_pending)
6441 6 : IssuePendingWritebacks(wb_context, io_context);
6442 : }
6443 :
6444 : #define ST_SORT sort_pending_writebacks
6445 : #define ST_ELEMENT_TYPE PendingWriteback
6446 : #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
6447 : #define ST_SCOPE static
6448 : #define ST_DEFINE
6449 : #include "lib/sort_template.h"
6450 :
6451 : /*
6452 : * Issue all pending writeback requests, previously scheduled with
6453 : * ScheduleBufferTagForWriteback, to the OS.
6454 : *
6455 : * Because this is only used to improve the OSs IO scheduling we try to never
6456 : * error out - it's just a hint.
6457 : */
6458 : void
6459 2082 : IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
6460 : {
6461 : instr_time io_start;
6462 : int i;
6463 :
6464 2082 : if (wb_context->nr_pending == 0)
6465 2082 : return;
6466 :
6467 : /*
6468 : * Executing the writes in-order can make them a lot faster, and allows to
6469 : * merge writeback requests to consecutive blocks into larger writebacks.
6470 : */
6471 0 : sort_pending_writebacks(wb_context->pending_writebacks,
6472 0 : wb_context->nr_pending);
6473 :
6474 0 : io_start = pgstat_prepare_io_time(track_io_timing);
6475 :
6476 : /*
6477 : * Coalesce neighbouring writes, but nothing else. For that we iterate
6478 : * through the, now sorted, array of pending flushes, and look forward to
6479 : * find all neighbouring (or identical) writes.
6480 : */
6481 0 : for (i = 0; i < wb_context->nr_pending; i++)
6482 : {
6483 : PendingWriteback *cur;
6484 : PendingWriteback *next;
6485 : SMgrRelation reln;
6486 : int ahead;
6487 : BufferTag tag;
6488 : RelFileLocator currlocator;
6489 0 : Size nblocks = 1;
6490 :
6491 0 : cur = &wb_context->pending_writebacks[i];
6492 0 : tag = cur->tag;
6493 0 : currlocator = BufTagGetRelFileLocator(&tag);
6494 :
6495 : /*
6496 : * Peek ahead, into following writeback requests, to see if they can
6497 : * be combined with the current one.
6498 : */
6499 0 : for (ahead = 0; i + ahead + 1 < wb_context->nr_pending; ahead++)
6500 : {
6501 :
6502 0 : next = &wb_context->pending_writebacks[i + ahead + 1];
6503 :
6504 : /* different file, stop */
6505 0 : if (!RelFileLocatorEquals(currlocator,
6506 0 : BufTagGetRelFileLocator(&next->tag)) ||
6507 0 : BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
6508 : break;
6509 :
6510 : /* ok, block queued twice, skip */
6511 0 : if (cur->tag.blockNum == next->tag.blockNum)
6512 0 : continue;
6513 :
6514 : /* only merge consecutive writes */
6515 0 : if (cur->tag.blockNum + 1 != next->tag.blockNum)
6516 0 : break;
6517 :
6518 0 : nblocks++;
6519 0 : cur = next;
6520 : }
6521 :
6522 0 : i += ahead;
6523 :
6524 : /* and finally tell the kernel to write the data to storage */
6525 0 : reln = smgropen(currlocator, INVALID_PROC_NUMBER);
6526 0 : smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
6527 : }
6528 :
6529 : /*
6530 : * Assume that writeback requests are only issued for buffers containing
6531 : * blocks of permanent relations.
6532 : */
6533 0 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
6534 0 : IOOP_WRITEBACK, io_start, wb_context->nr_pending, 0);
6535 :
6536 0 : wb_context->nr_pending = 0;
6537 : }
6538 :
6539 : /* ResourceOwner callbacks */
6540 :
6541 : static void
6542 30 : ResOwnerReleaseBufferIO(Datum res)
6543 : {
6544 30 : Buffer buffer = DatumGetInt32(res);
6545 :
6546 30 : AbortBufferIO(buffer);
6547 30 : }
6548 :
6549 : static char *
6550 0 : ResOwnerPrintBufferIO(Datum res)
6551 : {
6552 0 : Buffer buffer = DatumGetInt32(res);
6553 :
6554 0 : return psprintf("lost track of buffer IO on buffer %d", buffer);
6555 : }
6556 :
6557 : static void
6558 14756 : ResOwnerReleaseBufferPin(Datum res)
6559 : {
6560 14756 : Buffer buffer = DatumGetInt32(res);
6561 :
6562 : /* Like ReleaseBuffer, but don't call ResourceOwnerForgetBuffer */
6563 14756 : if (!BufferIsValid(buffer))
6564 0 : elog(ERROR, "bad buffer ID: %d", buffer);
6565 :
6566 14756 : if (BufferIsLocal(buffer))
6567 5970 : UnpinLocalBufferNoOwner(buffer);
6568 : else
6569 8786 : UnpinBufferNoOwner(GetBufferDescriptor(buffer - 1));
6570 14756 : }
6571 :
6572 : static char *
6573 0 : ResOwnerPrintBufferPin(Datum res)
6574 : {
6575 0 : return DebugPrintBufferRefcount(DatumGetInt32(res));
6576 : }
6577 :
6578 : /*
6579 : * Helper function to evict unpinned buffer whose buffer header lock is
6580 : * already acquired.
6581 : */
6582 : static bool
6583 4266 : EvictUnpinnedBufferInternal(BufferDesc *desc, bool *buffer_flushed)
6584 : {
6585 : uint32 buf_state;
6586 : bool result;
6587 :
6588 4266 : *buffer_flushed = false;
6589 :
6590 4266 : buf_state = pg_atomic_read_u32(&(desc->state));
6591 : Assert(buf_state & BM_LOCKED);
6592 :
6593 4266 : if ((buf_state & BM_VALID) == 0)
6594 : {
6595 0 : UnlockBufHdr(desc, buf_state);
6596 0 : return false;
6597 : }
6598 :
6599 : /* Check that it's not pinned already. */
6600 4266 : if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
6601 : {
6602 0 : UnlockBufHdr(desc, buf_state);
6603 0 : return false;
6604 : }
6605 :
6606 4266 : PinBuffer_Locked(desc); /* releases spinlock */
6607 :
6608 : /* If it was dirty, try to clean it once. */
6609 4266 : if (buf_state & BM_DIRTY)
6610 : {
6611 1934 : LWLockAcquire(BufferDescriptorGetContentLock(desc), LW_SHARED);
6612 1934 : FlushBuffer(desc, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
6613 1934 : *buffer_flushed = true;
6614 1934 : LWLockRelease(BufferDescriptorGetContentLock(desc));
6615 : }
6616 :
6617 : /* This will return false if it becomes dirty or someone else pins it. */
6618 4266 : result = InvalidateVictimBuffer(desc);
6619 :
6620 4266 : UnpinBuffer(desc);
6621 :
6622 4266 : return result;
6623 : }
6624 :
6625 : /*
6626 : * Try to evict the current block in a shared buffer.
6627 : *
6628 : * This function is intended for testing/development use only!
6629 : *
6630 : * To succeed, the buffer must not be pinned on entry, so if the caller had a
6631 : * particular block in mind, it might already have been replaced by some other
6632 : * block by the time this function runs. It's also unpinned on return, so the
6633 : * buffer might be occupied again by the time control is returned, potentially
6634 : * even by the same block. This inherent raciness without other interlocking
6635 : * makes the function unsuitable for non-testing usage.
6636 : *
6637 : * *buffer_flushed is set to true if the buffer was dirty and has been
6638 : * flushed, false otherwise. However, *buffer_flushed=true does not
6639 : * necessarily mean that we flushed the buffer, it could have been flushed by
6640 : * someone else.
6641 : *
6642 : * Returns true if the buffer was valid and it has now been made invalid.
6643 : * Returns false if it wasn't valid, if it couldn't be evicted due to a pin,
6644 : * or if the buffer becomes dirty again while we're trying to write it out.
6645 : */
6646 : bool
6647 280 : EvictUnpinnedBuffer(Buffer buf, bool *buffer_flushed)
6648 : {
6649 : BufferDesc *desc;
6650 :
6651 : Assert(BufferIsValid(buf) && !BufferIsLocal(buf));
6652 :
6653 : /* Make sure we can pin the buffer. */
6654 280 : ResourceOwnerEnlarge(CurrentResourceOwner);
6655 280 : ReservePrivateRefCountEntry();
6656 :
6657 280 : desc = GetBufferDescriptor(buf - 1);
6658 280 : LockBufHdr(desc);
6659 :
6660 280 : return EvictUnpinnedBufferInternal(desc, buffer_flushed);
6661 : }
6662 :
6663 : /*
6664 : * Try to evict all the shared buffers.
6665 : *
6666 : * This function is intended for testing/development use only! See
6667 : * EvictUnpinnedBuffer().
6668 : *
6669 : * The buffers_* parameters are mandatory and indicate the total count of
6670 : * buffers that:
6671 : * - buffers_evicted - were evicted
6672 : * - buffers_flushed - were flushed
6673 : * - buffers_skipped - could not be evicted
6674 : */
6675 : void
6676 2 : EvictAllUnpinnedBuffers(int32 *buffers_evicted, int32 *buffers_flushed,
6677 : int32 *buffers_skipped)
6678 : {
6679 2 : *buffers_evicted = 0;
6680 2 : *buffers_skipped = 0;
6681 2 : *buffers_flushed = 0;
6682 :
6683 32770 : for (int buf = 1; buf <= NBuffers; buf++)
6684 : {
6685 32768 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
6686 : uint32 buf_state;
6687 : bool buffer_flushed;
6688 :
6689 32768 : buf_state = pg_atomic_read_u32(&desc->state);
6690 32768 : if (!(buf_state & BM_VALID))
6691 28782 : continue;
6692 :
6693 3986 : ResourceOwnerEnlarge(CurrentResourceOwner);
6694 3986 : ReservePrivateRefCountEntry();
6695 :
6696 3986 : LockBufHdr(desc);
6697 :
6698 3986 : if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
6699 3986 : (*buffers_evicted)++;
6700 : else
6701 0 : (*buffers_skipped)++;
6702 :
6703 3986 : if (buffer_flushed)
6704 1896 : (*buffers_flushed)++;
6705 : }
6706 2 : }
6707 :
6708 : /*
6709 : * Try to evict all the shared buffers containing provided relation's pages.
6710 : *
6711 : * This function is intended for testing/development use only! See
6712 : * EvictUnpinnedBuffer().
6713 : *
6714 : * The caller must hold at least AccessShareLock on the relation to prevent
6715 : * the relation from being dropped.
6716 : *
6717 : * The buffers_* parameters are mandatory and indicate the total count of
6718 : * buffers that:
6719 : * - buffers_evicted - were evicted
6720 : * - buffers_flushed - were flushed
6721 : * - buffers_skipped - could not be evicted
6722 : */
6723 : void
6724 2 : EvictRelUnpinnedBuffers(Relation rel, int32 *buffers_evicted,
6725 : int32 *buffers_flushed, int32 *buffers_skipped)
6726 : {
6727 : Assert(!RelationUsesLocalBuffers(rel));
6728 :
6729 2 : *buffers_skipped = 0;
6730 2 : *buffers_evicted = 0;
6731 2 : *buffers_flushed = 0;
6732 :
6733 32770 : for (int buf = 1; buf <= NBuffers; buf++)
6734 : {
6735 32768 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
6736 32768 : uint32 buf_state = pg_atomic_read_u32(&(desc->state));
6737 : bool buffer_flushed;
6738 :
6739 : /* An unlocked precheck should be safe and saves some cycles. */
6740 32768 : if ((buf_state & BM_VALID) == 0 ||
6741 56 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
6742 32768 : continue;
6743 :
6744 : /* Make sure we can pin the buffer. */
6745 0 : ResourceOwnerEnlarge(CurrentResourceOwner);
6746 0 : ReservePrivateRefCountEntry();
6747 :
6748 0 : buf_state = LockBufHdr(desc);
6749 :
6750 : /* recheck, could have changed without the lock */
6751 0 : if ((buf_state & BM_VALID) == 0 ||
6752 0 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
6753 : {
6754 0 : UnlockBufHdr(desc, buf_state);
6755 0 : continue;
6756 : }
6757 :
6758 0 : if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
6759 0 : (*buffers_evicted)++;
6760 : else
6761 0 : (*buffers_skipped)++;
6762 :
6763 0 : if (buffer_flushed)
6764 0 : (*buffers_flushed)++;
6765 : }
6766 2 : }
6767 :
6768 : /*
6769 : * Generic implementation of the AIO handle staging callback for readv/writev
6770 : * on local/shared buffers.
6771 : *
6772 : * Each readv/writev can target multiple buffers. The buffers have already
6773 : * been registered with the IO handle.
6774 : *
6775 : * To make the IO ready for execution ("staging"), we need to ensure that the
6776 : * targeted buffers are in an appropriate state while the IO is ongoing. For
6777 : * that the AIO subsystem needs to have its own buffer pin, otherwise an error
6778 : * in this backend could lead to this backend's buffer pin being released as
6779 : * part of error handling, which in turn could lead to the buffer being
6780 : * replaced while IO is ongoing.
6781 : */
6782 : static pg_attribute_always_inline void
6783 2422904 : buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
6784 : {
6785 : uint64 *io_data;
6786 : uint8 handle_data_len;
6787 : PgAioWaitRef io_ref;
6788 2422904 : BufferTag first PG_USED_FOR_ASSERTS_ONLY = {0};
6789 :
6790 2422904 : io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
6791 :
6792 2422904 : pgaio_io_get_wref(ioh, &io_ref);
6793 :
6794 : /* iterate over all buffers affected by the vectored readv/writev */
6795 5167748 : for (int i = 0; i < handle_data_len; i++)
6796 : {
6797 2744844 : Buffer buffer = (Buffer) io_data[i];
6798 2744844 : BufferDesc *buf_hdr = is_temp ?
6799 16794 : GetLocalBufferDescriptor(-buffer - 1)
6800 2744844 : : GetBufferDescriptor(buffer - 1);
6801 : uint32 buf_state;
6802 :
6803 : /*
6804 : * Check that all the buffers are actually ones that could conceivably
6805 : * be done in one IO, i.e. are sequential. This is the last
6806 : * buffer-aware code before IO is actually executed and confusion
6807 : * about which buffers are targeted by IO can be hard to debug, making
6808 : * it worth doing extra-paranoid checks.
6809 : */
6810 2744844 : if (i == 0)
6811 2422904 : first = buf_hdr->tag;
6812 : else
6813 : {
6814 : Assert(buf_hdr->tag.relNumber == first.relNumber);
6815 : Assert(buf_hdr->tag.blockNum == first.blockNum + i);
6816 : }
6817 :
6818 2744844 : if (is_temp)
6819 16794 : buf_state = pg_atomic_read_u32(&buf_hdr->state);
6820 : else
6821 2728050 : buf_state = LockBufHdr(buf_hdr);
6822 :
6823 : /* verify the buffer is in the expected state */
6824 : Assert(buf_state & BM_TAG_VALID);
6825 : if (is_write)
6826 : {
6827 : Assert(buf_state & BM_VALID);
6828 : Assert(buf_state & BM_DIRTY);
6829 : }
6830 : else
6831 : {
6832 : Assert(!(buf_state & BM_VALID));
6833 : Assert(!(buf_state & BM_DIRTY));
6834 : }
6835 :
6836 : /* temp buffers don't use BM_IO_IN_PROGRESS */
6837 2744844 : if (!is_temp)
6838 : Assert(buf_state & BM_IO_IN_PROGRESS);
6839 :
6840 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) >= 1);
6841 :
6842 : /*
6843 : * Reflect that the buffer is now owned by the AIO subsystem.
6844 : *
6845 : * For local buffers: This can't be done just via LocalRefCount, as
6846 : * one might initially think, as this backend could error out while
6847 : * AIO is still in progress, releasing all the pins by the backend
6848 : * itself.
6849 : *
6850 : * This pin is released again in TerminateBufferIO().
6851 : */
6852 2744844 : buf_state += BUF_REFCOUNT_ONE;
6853 2744844 : buf_hdr->io_wref = io_ref;
6854 :
6855 2744844 : if (is_temp)
6856 16794 : pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
6857 : else
6858 2728050 : UnlockBufHdr(buf_hdr, buf_state);
6859 :
6860 : /*
6861 : * Ensure the content lock that prevents buffer modifications while
6862 : * the buffer is being written out is not released early due to an
6863 : * error.
6864 : */
6865 2744844 : if (is_write && !is_temp)
6866 : {
6867 : LWLock *content_lock;
6868 :
6869 0 : content_lock = BufferDescriptorGetContentLock(buf_hdr);
6870 :
6871 : Assert(LWLockHeldByMe(content_lock));
6872 :
6873 : /*
6874 : * Lock is now owned by AIO subsystem.
6875 : */
6876 0 : LWLockDisown(content_lock);
6877 : }
6878 :
6879 : /*
6880 : * Stop tracking this buffer via the resowner - the AIO system now
6881 : * keeps track.
6882 : */
6883 2744844 : if (!is_temp)
6884 2728050 : ResourceOwnerForgetBufferIO(CurrentResourceOwner, buffer);
6885 : }
6886 2422904 : }
6887 :
6888 : /*
6889 : * Decode readv errors as encoded by buffer_readv_encode_error().
6890 : */
6891 : static inline void
6892 698 : buffer_readv_decode_error(PgAioResult result,
6893 : bool *zeroed_any,
6894 : bool *ignored_any,
6895 : uint8 *zeroed_or_error_count,
6896 : uint8 *checkfail_count,
6897 : uint8 *first_off)
6898 : {
6899 698 : uint32 rem_error = result.error_data;
6900 :
6901 : /* see static asserts in buffer_readv_encode_error */
6902 : #define READV_COUNT_BITS 7
6903 : #define READV_COUNT_MASK ((1 << READV_COUNT_BITS) - 1)
6904 :
6905 698 : *zeroed_any = rem_error & 1;
6906 698 : rem_error >>= 1;
6907 :
6908 698 : *ignored_any = rem_error & 1;
6909 698 : rem_error >>= 1;
6910 :
6911 698 : *zeroed_or_error_count = rem_error & READV_COUNT_MASK;
6912 698 : rem_error >>= READV_COUNT_BITS;
6913 :
6914 698 : *checkfail_count = rem_error & READV_COUNT_MASK;
6915 698 : rem_error >>= READV_COUNT_BITS;
6916 :
6917 698 : *first_off = rem_error & READV_COUNT_MASK;
6918 698 : rem_error >>= READV_COUNT_BITS;
6919 698 : }
6920 :
6921 : /*
6922 : * Helper to encode errors for buffer_readv_complete()
6923 : *
6924 : * Errors are encoded as follows:
6925 : * - bit 0 indicates whether any page was zeroed (1) or not (0)
6926 : * - bit 1 indicates whether any checksum failure was ignored (1) or not (0)
6927 : * - next READV_COUNT_BITS bits indicate the number of errored or zeroed pages
6928 : * - next READV_COUNT_BITS bits indicate the number of checksum failures
6929 : * - next READV_COUNT_BITS bits indicate the first offset of the first page
6930 : * that was errored or zeroed or, if no errors/zeroes, the first ignored
6931 : * checksum
6932 : */
6933 : static inline void
6934 384 : buffer_readv_encode_error(PgAioResult *result,
6935 : bool is_temp,
6936 : bool zeroed_any,
6937 : bool ignored_any,
6938 : uint8 error_count,
6939 : uint8 zeroed_count,
6940 : uint8 checkfail_count,
6941 : uint8 first_error_off,
6942 : uint8 first_zeroed_off,
6943 : uint8 first_ignored_off)
6944 : {
6945 :
6946 384 : uint8 shift = 0;
6947 384 : uint8 zeroed_or_error_count =
6948 : error_count > 0 ? error_count : zeroed_count;
6949 : uint8 first_off;
6950 :
6951 : StaticAssertStmt(PG_IOV_MAX <= 1 << READV_COUNT_BITS,
6952 : "PG_IOV_MAX is bigger than reserved space for error data");
6953 : StaticAssertStmt((1 + 1 + 3 * READV_COUNT_BITS) <= PGAIO_RESULT_ERROR_BITS,
6954 : "PGAIO_RESULT_ERROR_BITS is insufficient for buffer_readv");
6955 :
6956 : /*
6957 : * We only have space to encode one offset - but luckily that's good
6958 : * enough. If there is an error, the error is the interesting offset, same
6959 : * with a zeroed buffer vs an ignored buffer.
6960 : */
6961 384 : if (error_count > 0)
6962 188 : first_off = first_error_off;
6963 196 : else if (zeroed_count > 0)
6964 160 : first_off = first_zeroed_off;
6965 : else
6966 36 : first_off = first_ignored_off;
6967 :
6968 : Assert(!zeroed_any || error_count == 0);
6969 :
6970 384 : result->error_data = 0;
6971 :
6972 384 : result->error_data |= zeroed_any << shift;
6973 384 : shift += 1;
6974 :
6975 384 : result->error_data |= ignored_any << shift;
6976 384 : shift += 1;
6977 :
6978 384 : result->error_data |= ((uint32) zeroed_or_error_count) << shift;
6979 384 : shift += READV_COUNT_BITS;
6980 :
6981 384 : result->error_data |= ((uint32) checkfail_count) << shift;
6982 384 : shift += READV_COUNT_BITS;
6983 :
6984 384 : result->error_data |= ((uint32) first_off) << shift;
6985 384 : shift += READV_COUNT_BITS;
6986 :
6987 384 : result->id = is_temp ? PGAIO_HCB_LOCAL_BUFFER_READV :
6988 : PGAIO_HCB_SHARED_BUFFER_READV;
6989 :
6990 384 : if (error_count > 0)
6991 188 : result->status = PGAIO_RS_ERROR;
6992 : else
6993 196 : result->status = PGAIO_RS_WARNING;
6994 :
6995 : /*
6996 : * The encoding is complicated enough to warrant cross-checking it against
6997 : * the decode function.
6998 : */
6999 : #ifdef USE_ASSERT_CHECKING
7000 : {
7001 : bool zeroed_any_2,
7002 : ignored_any_2;
7003 : uint8 zeroed_or_error_count_2,
7004 : checkfail_count_2,
7005 : first_off_2;
7006 :
7007 : buffer_readv_decode_error(*result,
7008 : &zeroed_any_2, &ignored_any_2,
7009 : &zeroed_or_error_count_2,
7010 : &checkfail_count_2,
7011 : &first_off_2);
7012 : Assert(zeroed_any == zeroed_any_2);
7013 : Assert(ignored_any == ignored_any_2);
7014 : Assert(zeroed_or_error_count == zeroed_or_error_count_2);
7015 : Assert(checkfail_count == checkfail_count_2);
7016 : Assert(first_off == first_off_2);
7017 : }
7018 : #endif
7019 :
7020 : #undef READV_COUNT_BITS
7021 : #undef READV_COUNT_MASK
7022 384 : }
7023 :
7024 : /*
7025 : * Helper for AIO readv completion callbacks, supporting both shared and temp
7026 : * buffers. Gets called once for each buffer in a multi-page read.
7027 : */
7028 : static pg_attribute_always_inline void
7029 2470440 : buffer_readv_complete_one(PgAioTargetData *td, uint8 buf_off, Buffer buffer,
7030 : uint8 flags, bool failed, bool is_temp,
7031 : bool *buffer_invalid,
7032 : bool *failed_checksum,
7033 : bool *ignored_checksum,
7034 : bool *zeroed_buffer)
7035 : {
7036 2470440 : BufferDesc *buf_hdr = is_temp ?
7037 16794 : GetLocalBufferDescriptor(-buffer - 1)
7038 2470440 : : GetBufferDescriptor(buffer - 1);
7039 2470440 : BufferTag tag = buf_hdr->tag;
7040 2470440 : char *bufdata = BufferGetBlock(buffer);
7041 : uint32 set_flag_bits;
7042 : int piv_flags;
7043 :
7044 : /* check that the buffer is in the expected state for a read */
7045 : #ifdef USE_ASSERT_CHECKING
7046 : {
7047 : uint32 buf_state = pg_atomic_read_u32(&buf_hdr->state);
7048 :
7049 : Assert(buf_state & BM_TAG_VALID);
7050 : Assert(!(buf_state & BM_VALID));
7051 : /* temp buffers don't use BM_IO_IN_PROGRESS */
7052 : if (!is_temp)
7053 : Assert(buf_state & BM_IO_IN_PROGRESS);
7054 : Assert(!(buf_state & BM_DIRTY));
7055 : }
7056 : #endif
7057 :
7058 2470440 : *buffer_invalid = false;
7059 2470440 : *failed_checksum = false;
7060 2470440 : *ignored_checksum = false;
7061 2470440 : *zeroed_buffer = false;
7062 :
7063 : /*
7064 : * We ask PageIsVerified() to only log the message about checksum errors,
7065 : * as the completion might be run in any backend (or IO workers). We will
7066 : * report checksum errors in buffer_readv_report().
7067 : */
7068 2470440 : piv_flags = PIV_LOG_LOG;
7069 :
7070 : /* the local zero_damaged_pages may differ from the definer's */
7071 2470440 : if (flags & READ_BUFFERS_IGNORE_CHECKSUM_FAILURES)
7072 76 : piv_flags |= PIV_IGNORE_CHECKSUM_FAILURE;
7073 :
7074 : /* Check for garbage data. */
7075 2470440 : if (!failed)
7076 : {
7077 : /*
7078 : * If the buffer is not currently pinned by this backend, e.g. because
7079 : * we're completing this IO after an error, the buffer data will have
7080 : * been marked as inaccessible when the buffer was unpinned. The AIO
7081 : * subsystem holds a pin, but that doesn't prevent the buffer from
7082 : * having been marked as inaccessible. The completion might also be
7083 : * executed in a different process.
7084 : */
7085 : #ifdef USE_VALGRIND
7086 : if (!BufferIsPinned(buffer))
7087 : VALGRIND_MAKE_MEM_DEFINED(bufdata, BLCKSZ);
7088 : #endif
7089 :
7090 2470382 : if (!PageIsVerified((Page) bufdata, tag.blockNum, piv_flags,
7091 : failed_checksum))
7092 : {
7093 192 : if (flags & READ_BUFFERS_ZERO_ON_ERROR)
7094 : {
7095 92 : memset(bufdata, 0, BLCKSZ);
7096 92 : *zeroed_buffer = true;
7097 : }
7098 : else
7099 : {
7100 100 : *buffer_invalid = true;
7101 : /* mark buffer as having failed */
7102 100 : failed = true;
7103 : }
7104 : }
7105 2470190 : else if (*failed_checksum)
7106 24 : *ignored_checksum = true;
7107 :
7108 : /* undo what we did above */
7109 : #ifdef USE_VALGRIND
7110 : if (!BufferIsPinned(buffer))
7111 : VALGRIND_MAKE_MEM_NOACCESS(bufdata, BLCKSZ);
7112 : #endif
7113 :
7114 : /*
7115 : * Immediately log a message about the invalid page, but only to the
7116 : * server log. The reason to do so immediately is that this may be
7117 : * executed in a different backend than the one that originated the
7118 : * request. The reason to do so immediately is that the originator
7119 : * might not process the query result immediately (because it is busy
7120 : * doing another part of query processing) or at all (e.g. if it was
7121 : * cancelled or errored out due to another IO also failing). The
7122 : * definer of the IO will emit an ERROR or WARNING when processing the
7123 : * IO's results
7124 : *
7125 : * To avoid duplicating the code to emit these log messages, we reuse
7126 : * buffer_readv_report().
7127 : */
7128 2470382 : if (*buffer_invalid || *failed_checksum || *zeroed_buffer)
7129 : {
7130 216 : PgAioResult result_one = {0};
7131 :
7132 216 : buffer_readv_encode_error(&result_one, is_temp,
7133 216 : *zeroed_buffer,
7134 216 : *ignored_checksum,
7135 216 : *buffer_invalid,
7136 216 : *zeroed_buffer ? 1 : 0,
7137 216 : *failed_checksum ? 1 : 0,
7138 : buf_off, buf_off, buf_off);
7139 216 : pgaio_result_report(result_one, td, LOG_SERVER_ONLY);
7140 : }
7141 : }
7142 :
7143 : /* Terminate I/O and set BM_VALID. */
7144 2470440 : set_flag_bits = failed ? BM_IO_ERROR : BM_VALID;
7145 2470440 : if (is_temp)
7146 16794 : TerminateLocalBufferIO(buf_hdr, false, set_flag_bits, true);
7147 : else
7148 2453646 : TerminateBufferIO(buf_hdr, false, set_flag_bits, false, true);
7149 :
7150 : /*
7151 : * Call the BUFFER_READ_DONE tracepoint in the callback, even though the
7152 : * callback may not be executed in the same backend that called
7153 : * BUFFER_READ_START. The alternative would be to defer calling the
7154 : * tracepoint to a later point (e.g. the local completion callback for
7155 : * shared buffer reads), which seems even less helpful.
7156 : */
7157 : TRACE_POSTGRESQL_BUFFER_READ_DONE(tag.forkNum,
7158 : tag.blockNum,
7159 : tag.spcOid,
7160 : tag.dbOid,
7161 : tag.relNumber,
7162 : is_temp ? MyProcNumber : INVALID_PROC_NUMBER,
7163 : false);
7164 2470440 : }
7165 :
7166 : /*
7167 : * Perform completion handling of a single AIO read. This read may cover
7168 : * multiple blocks / buffers.
7169 : *
7170 : * Shared between shared and local buffers, to reduce code duplication.
7171 : */
7172 : static pg_attribute_always_inline PgAioResult
7173 2214354 : buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
7174 : uint8 cb_data, bool is_temp)
7175 : {
7176 2214354 : PgAioResult result = prior_result;
7177 2214354 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
7178 2214354 : uint8 first_error_off = 0;
7179 2214354 : uint8 first_zeroed_off = 0;
7180 2214354 : uint8 first_ignored_off = 0;
7181 2214354 : uint8 error_count = 0;
7182 2214354 : uint8 zeroed_count = 0;
7183 2214354 : uint8 ignored_count = 0;
7184 2214354 : uint8 checkfail_count = 0;
7185 : uint64 *io_data;
7186 : uint8 handle_data_len;
7187 :
7188 : if (is_temp)
7189 : {
7190 : Assert(td->smgr.is_temp);
7191 : Assert(pgaio_io_get_owner(ioh) == MyProcNumber);
7192 : }
7193 : else
7194 : Assert(!td->smgr.is_temp);
7195 :
7196 : /*
7197 : * Iterate over all the buffers affected by this IO and call the
7198 : * per-buffer completion function for each buffer.
7199 : */
7200 2214354 : io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
7201 4684794 : for (uint8 buf_off = 0; buf_off < handle_data_len; buf_off++)
7202 : {
7203 2470440 : Buffer buf = io_data[buf_off];
7204 : bool failed;
7205 2470440 : bool failed_verification = false;
7206 2470440 : bool failed_checksum = false;
7207 2470440 : bool zeroed_buffer = false;
7208 2470440 : bool ignored_checksum = false;
7209 :
7210 : Assert(BufferIsValid(buf));
7211 :
7212 : /*
7213 : * If the entire I/O failed on a lower-level, each buffer needs to be
7214 : * marked as failed. In case of a partial read, the first few buffers
7215 : * may be ok.
7216 : */
7217 2470440 : failed =
7218 2470440 : prior_result.status == PGAIO_RS_ERROR
7219 2470440 : || prior_result.result <= buf_off;
7220 :
7221 2470440 : buffer_readv_complete_one(td, buf_off, buf, cb_data, failed, is_temp,
7222 : &failed_verification,
7223 : &failed_checksum,
7224 : &ignored_checksum,
7225 : &zeroed_buffer);
7226 :
7227 : /*
7228 : * Track information about the number of different kinds of error
7229 : * conditions across all pages, as there can be multiple pages failing
7230 : * verification as part of one IO.
7231 : */
7232 2470440 : if (failed_verification && !zeroed_buffer && error_count++ == 0)
7233 88 : first_error_off = buf_off;
7234 2470440 : if (zeroed_buffer && zeroed_count++ == 0)
7235 68 : first_zeroed_off = buf_off;
7236 2470440 : if (ignored_checksum && ignored_count++ == 0)
7237 20 : first_ignored_off = buf_off;
7238 2470440 : if (failed_checksum)
7239 64 : checkfail_count++;
7240 : }
7241 :
7242 : /*
7243 : * If the smgr read succeeded [partially] and page verification failed for
7244 : * some of the pages, adjust the IO's result state appropriately.
7245 : */
7246 2214354 : if (prior_result.status != PGAIO_RS_ERROR &&
7247 2214248 : (error_count > 0 || ignored_count > 0 || zeroed_count > 0))
7248 : {
7249 168 : buffer_readv_encode_error(&result, is_temp,
7250 : zeroed_count > 0, ignored_count > 0,
7251 : error_count, zeroed_count, checkfail_count,
7252 : first_error_off, first_zeroed_off,
7253 : first_ignored_off);
7254 168 : pgaio_result_report(result, td, DEBUG1);
7255 : }
7256 :
7257 : /*
7258 : * For shared relations this reporting is done in
7259 : * shared_buffer_readv_complete_local().
7260 : */
7261 2214354 : if (is_temp && checkfail_count > 0)
7262 4 : pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
7263 : checkfail_count);
7264 :
7265 2214354 : return result;
7266 : }
7267 :
7268 : /*
7269 : * AIO error reporting callback for aio_shared_buffer_readv_cb and
7270 : * aio_local_buffer_readv_cb.
7271 : *
7272 : * The error is encoded / decoded in buffer_readv_encode_error() /
7273 : * buffer_readv_decode_error().
7274 : */
7275 : static void
7276 544 : buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
7277 : int elevel)
7278 : {
7279 544 : int nblocks = td->smgr.nblocks;
7280 544 : BlockNumber first = td->smgr.blockNum;
7281 544 : BlockNumber last = first + nblocks - 1;
7282 544 : ProcNumber errProc =
7283 544 : td->smgr.is_temp ? MyProcNumber : INVALID_PROC_NUMBER;
7284 : RelPathStr rpath =
7285 544 : relpathbackend(td->smgr.rlocator, errProc, td->smgr.forkNum);
7286 : bool zeroed_any,
7287 : ignored_any;
7288 : uint8 zeroed_or_error_count,
7289 : checkfail_count,
7290 : first_off;
7291 : uint8 affected_count;
7292 : const char *msg_one,
7293 : *msg_mult,
7294 : *det_mult,
7295 : *hint_mult;
7296 :
7297 544 : buffer_readv_decode_error(result, &zeroed_any, &ignored_any,
7298 : &zeroed_or_error_count,
7299 : &checkfail_count,
7300 : &first_off);
7301 :
7302 : /*
7303 : * Treat a read that had both zeroed buffers *and* ignored checksums as a
7304 : * special case, it's too irregular to be emitted the same way as the
7305 : * other cases.
7306 : */
7307 544 : if (zeroed_any && ignored_any)
7308 : {
7309 : Assert(zeroed_any && ignored_any);
7310 : Assert(nblocks > 1); /* same block can't be both zeroed and ignored */
7311 : Assert(result.status != PGAIO_RS_ERROR);
7312 8 : affected_count = zeroed_or_error_count;
7313 :
7314 8 : ereport(elevel,
7315 : errcode(ERRCODE_DATA_CORRUPTED),
7316 : errmsg("zeroing %u page(s) and ignoring %u checksum failure(s) among blocks %u..%u of relation %s",
7317 : affected_count, checkfail_count, first, last, rpath.str),
7318 : affected_count > 1 ?
7319 : errdetail("Block %u held first zeroed page.",
7320 : first + first_off) : 0,
7321 : errhint("See server log for details about the other %d invalid block(s).",
7322 : affected_count + checkfail_count - 1));
7323 8 : return;
7324 : }
7325 :
7326 : /*
7327 : * The other messages are highly repetitive. To avoid duplicating a long
7328 : * and complicated ereport(), gather the translated format strings
7329 : * separately and then do one common ereport.
7330 : */
7331 536 : if (result.status == PGAIO_RS_ERROR)
7332 : {
7333 : Assert(!zeroed_any); /* can't have invalid pages when zeroing them */
7334 272 : affected_count = zeroed_or_error_count;
7335 272 : msg_one = _("invalid page in block %u of relation %s");
7336 272 : msg_mult = _("%u invalid pages among blocks %u..%u of relation %s");
7337 272 : det_mult = _("Block %u held first invalid page.");
7338 272 : hint_mult = _("See server log for the other %u invalid block(s).");
7339 : }
7340 264 : else if (zeroed_any && !ignored_any)
7341 : {
7342 216 : affected_count = zeroed_or_error_count;
7343 216 : msg_one = _("invalid page in block %u of relation %s; zeroing out page");
7344 216 : msg_mult = _("zeroing out %u invalid pages among blocks %u..%u of relation %s");
7345 216 : det_mult = _("Block %u held first zeroed page.");
7346 216 : hint_mult = _("See server log for the other %u zeroed block(s).");
7347 : }
7348 48 : else if (!zeroed_any && ignored_any)
7349 : {
7350 48 : affected_count = checkfail_count;
7351 48 : msg_one = _("ignoring checksum failure in block %u of relation %s");
7352 48 : msg_mult = _("ignoring %u checksum failures among blocks %u..%u of relation %s");
7353 48 : det_mult = _("Block %u held first ignored page.");
7354 48 : hint_mult = _("See server log for the other %u ignored block(s).");
7355 : }
7356 : else
7357 0 : pg_unreachable();
7358 :
7359 536 : ereport(elevel,
7360 : errcode(ERRCODE_DATA_CORRUPTED),
7361 : affected_count == 1 ?
7362 : errmsg_internal(msg_one, first + first_off, rpath.str) :
7363 : errmsg_internal(msg_mult, affected_count, first, last, rpath.str),
7364 : affected_count > 1 ? errdetail_internal(det_mult, first + first_off) : 0,
7365 : affected_count > 1 ? errhint_internal(hint_mult, affected_count - 1) : 0);
7366 : }
7367 :
7368 : static void
7369 2419316 : shared_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
7370 : {
7371 2419316 : buffer_stage_common(ioh, false, false);
7372 2419316 : }
7373 :
7374 : static PgAioResult
7375 2210766 : shared_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
7376 : uint8 cb_data)
7377 : {
7378 2210766 : return buffer_readv_complete(ioh, prior_result, cb_data, false);
7379 : }
7380 :
7381 : /*
7382 : * We need a backend-local completion callback for shared buffers, to be able
7383 : * to report checksum errors correctly. Unfortunately that can only safely
7384 : * happen if the reporting backend has previously called
7385 : * pgstat_prepare_report_checksum_failure(), which we can only guarantee in
7386 : * the backend that started the IO. Hence this callback.
7387 : */
7388 : static PgAioResult
7389 2419316 : shared_buffer_readv_complete_local(PgAioHandle *ioh, PgAioResult prior_result,
7390 : uint8 cb_data)
7391 : {
7392 : bool zeroed_any,
7393 : ignored_any;
7394 : uint8 zeroed_or_error_count,
7395 : checkfail_count,
7396 : first_off;
7397 :
7398 2419316 : if (prior_result.status == PGAIO_RS_OK)
7399 2419162 : return prior_result;
7400 :
7401 154 : buffer_readv_decode_error(prior_result,
7402 : &zeroed_any,
7403 : &ignored_any,
7404 : &zeroed_or_error_count,
7405 : &checkfail_count,
7406 : &first_off);
7407 :
7408 154 : if (checkfail_count)
7409 : {
7410 48 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
7411 :
7412 48 : pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
7413 : checkfail_count);
7414 : }
7415 :
7416 154 : return prior_result;
7417 : }
7418 :
7419 : static void
7420 3588 : local_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
7421 : {
7422 3588 : buffer_stage_common(ioh, false, true);
7423 3588 : }
7424 :
7425 : static PgAioResult
7426 3588 : local_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
7427 : uint8 cb_data)
7428 : {
7429 3588 : return buffer_readv_complete(ioh, prior_result, cb_data, true);
7430 : }
7431 :
7432 : /* readv callback is passed READ_BUFFERS_* flags as callback data */
7433 : const PgAioHandleCallbacks aio_shared_buffer_readv_cb = {
7434 : .stage = shared_buffer_readv_stage,
7435 : .complete_shared = shared_buffer_readv_complete,
7436 : /* need a local callback to report checksum failures */
7437 : .complete_local = shared_buffer_readv_complete_local,
7438 : .report = buffer_readv_report,
7439 : };
7440 :
7441 : /* readv callback is passed READ_BUFFERS_* flags as callback data */
7442 : const PgAioHandleCallbacks aio_local_buffer_readv_cb = {
7443 : .stage = local_buffer_readv_stage,
7444 :
7445 : /*
7446 : * Note that this, in contrast to the shared_buffers case, uses
7447 : * complete_local, as only the issuing backend has access to the required
7448 : * datastructures. This is important in case the IO completion may be
7449 : * consumed incidentally by another backend.
7450 : */
7451 : .complete_local = local_buffer_readv_complete,
7452 : .report = buffer_readv_report,
7453 : };
|