Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * bufmgr.c
4 : * buffer manager interface routines
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/buffer/bufmgr.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /*
16 : * Principal entry points:
17 : *
18 : * ReadBuffer() -- find or create a buffer holding the requested page,
19 : * and pin it so that no one can destroy it while this process
20 : * is using it.
21 : *
22 : * StartReadBuffer() -- as above, with separate wait step
23 : * StartReadBuffers() -- multiple block version
24 : * WaitReadBuffers() -- second step of above
25 : *
26 : * ReleaseBuffer() -- unpin a buffer
27 : *
28 : * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
29 : * The disk write is delayed until buffer replacement or checkpoint.
30 : *
31 : * See also these files:
32 : * freelist.c -- chooses victim for buffer replacement
33 : * buf_table.c -- manages the buffer lookup table
34 : */
35 : #include "postgres.h"
36 :
37 : #include <sys/file.h>
38 : #include <unistd.h>
39 :
40 : #include "access/tableam.h"
41 : #include "access/xloginsert.h"
42 : #include "access/xlogutils.h"
43 : #ifdef USE_ASSERT_CHECKING
44 : #include "catalog/pg_tablespace_d.h"
45 : #endif
46 : #include "catalog/storage.h"
47 : #include "catalog/storage_xlog.h"
48 : #include "executor/instrument.h"
49 : #include "lib/binaryheap.h"
50 : #include "miscadmin.h"
51 : #include "pg_trace.h"
52 : #include "pgstat.h"
53 : #include "postmaster/bgwriter.h"
54 : #include "storage/aio.h"
55 : #include "storage/buf_internals.h"
56 : #include "storage/bufmgr.h"
57 : #include "storage/fd.h"
58 : #include "storage/ipc.h"
59 : #include "storage/lmgr.h"
60 : #include "storage/proc.h"
61 : #include "storage/proclist.h"
62 : #include "storage/procsignal.h"
63 : #include "storage/read_stream.h"
64 : #include "storage/smgr.h"
65 : #include "storage/standby.h"
66 : #include "utils/memdebug.h"
67 : #include "utils/ps_status.h"
68 : #include "utils/rel.h"
69 : #include "utils/resowner.h"
70 : #include "utils/timestamp.h"
71 : #include "utils/wait_event.h"
72 :
73 :
74 : /* Note: these two macros only work on shared buffers, not local ones! */
75 : #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
76 : #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
77 :
78 : /* Note: this macro only works on local buffers, not shared ones! */
79 : #define LocalBufHdrGetBlock(bufHdr) \
80 : LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
81 :
82 : /* Bits in SyncOneBuffer's return value */
83 : #define BUF_WRITTEN 0x01
84 : #define BUF_REUSABLE 0x02
85 :
86 : #define RELS_BSEARCH_THRESHOLD 20
87 :
88 : /*
89 : * This is the size (in the number of blocks) above which we scan the
90 : * entire buffer pool to remove the buffers for all the pages of relation
91 : * being dropped. For the relations with size below this threshold, we find
92 : * the buffers by doing lookups in BufMapping table.
93 : */
94 : #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
95 :
96 : /*
97 : * This is separated out from PrivateRefCountEntry to allow for copying all
98 : * the data members via struct assignment.
99 : */
100 : typedef struct PrivateRefCountData
101 : {
102 : /*
103 : * How many times has the buffer been pinned by this backend.
104 : */
105 : int32 refcount;
106 :
107 : /*
108 : * Is the buffer locked by this backend? BUFFER_LOCK_UNLOCK indicates that
109 : * the buffer is not locked.
110 : */
111 : BufferLockMode lockmode;
112 : } PrivateRefCountData;
113 :
114 : typedef struct PrivateRefCountEntry
115 : {
116 : /*
117 : * Note that this needs to be same as the entry's corresponding
118 : * PrivateRefCountArrayKeys[i], if the entry is stored in the array. We
119 : * store it in both places as this is used for the hashtable key and
120 : * because it is more convenient (passing around a PrivateRefCountEntry
121 : * suffices to identify the buffer) and faster (checking the keys array is
122 : * faster when checking many entries, checking the entry is faster if just
123 : * checking a single entry).
124 : */
125 : Buffer buffer;
126 :
127 : PrivateRefCountData data;
128 : } PrivateRefCountEntry;
129 :
130 : /* 64 bytes, about the size of a cache line on common systems */
131 : #define REFCOUNT_ARRAY_ENTRIES 8
132 :
133 : /*
134 : * Status of buffers to checkpoint for a particular tablespace, used
135 : * internally in BufferSync.
136 : */
137 : typedef struct CkptTsStatus
138 : {
139 : /* oid of the tablespace */
140 : Oid tsId;
141 :
142 : /*
143 : * Checkpoint progress for this tablespace. To make progress comparable
144 : * between tablespaces the progress is, for each tablespace, measured as a
145 : * number between 0 and the total number of to-be-checkpointed pages. Each
146 : * page checkpointed in this tablespace increments this space's progress
147 : * by progress_slice.
148 : */
149 : float8 progress;
150 : float8 progress_slice;
151 :
152 : /* number of to-be checkpointed pages in this tablespace */
153 : int num_to_scan;
154 : /* already processed pages in this tablespace */
155 : int num_scanned;
156 :
157 : /* current offset in CkptBufferIds for this tablespace */
158 : int index;
159 : } CkptTsStatus;
160 :
161 : /*
162 : * Type for array used to sort SMgrRelations
163 : *
164 : * FlushRelationsAllBuffers shares the same comparator function with
165 : * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
166 : * compatible.
167 : */
168 : typedef struct SMgrSortArray
169 : {
170 : RelFileLocator rlocator; /* This must be the first member */
171 : SMgrRelation srel;
172 : } SMgrSortArray;
173 :
174 : /* GUC variables */
175 : bool zero_damaged_pages = false;
176 : int bgwriter_lru_maxpages = 100;
177 : double bgwriter_lru_multiplier = 2.0;
178 : bool track_io_timing = false;
179 :
180 : /*
181 : * How many buffers PrefetchBuffer callers should try to stay ahead of their
182 : * ReadBuffer calls by. Zero means "never prefetch". This value is only used
183 : * for buffers not belonging to tablespaces that have their
184 : * effective_io_concurrency parameter set.
185 : */
186 : int effective_io_concurrency = DEFAULT_EFFECTIVE_IO_CONCURRENCY;
187 :
188 : /*
189 : * Like effective_io_concurrency, but used by maintenance code paths that might
190 : * benefit from a higher setting because they work on behalf of many sessions.
191 : * Overridden by the tablespace setting of the same name.
192 : */
193 : int maintenance_io_concurrency = DEFAULT_MAINTENANCE_IO_CONCURRENCY;
194 :
195 : /*
196 : * Limit on how many blocks should be handled in single I/O operations.
197 : * StartReadBuffers() callers should respect it, as should other operations
198 : * that call smgr APIs directly. It is computed as the minimum of underlying
199 : * GUCs io_combine_limit_guc and io_max_combine_limit.
200 : */
201 : int io_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
202 : int io_combine_limit_guc = DEFAULT_IO_COMBINE_LIMIT;
203 : int io_max_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
204 :
205 : /*
206 : * GUC variables about triggering kernel writeback for buffers written; OS
207 : * dependent defaults are set via the GUC mechanism.
208 : */
209 : int checkpoint_flush_after = DEFAULT_CHECKPOINT_FLUSH_AFTER;
210 : int bgwriter_flush_after = DEFAULT_BGWRITER_FLUSH_AFTER;
211 : int backend_flush_after = DEFAULT_BACKEND_FLUSH_AFTER;
212 :
213 : /* local state for LockBufferForCleanup */
214 : static BufferDesc *PinCountWaitBuf = NULL;
215 :
216 : /*
217 : * Backend-Private refcount management:
218 : *
219 : * Each buffer also has a private refcount that keeps track of the number of
220 : * times the buffer is pinned in the current process. This is so that the
221 : * shared refcount needs to be modified only once if a buffer is pinned more
222 : * than once by an individual backend. It's also used to check that no
223 : * buffers are still pinned at the end of transactions and when exiting. We
224 : * also use this mechanism to track whether this backend has a buffer locked,
225 : * and, if so, in what mode.
226 : *
227 : *
228 : * To avoid - as we used to - requiring an array with NBuffers entries to keep
229 : * track of local buffers, we use a small sequentially searched array
230 : * (PrivateRefCountArrayKeys, with the corresponding data stored in
231 : * PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
232 : * keep track of backend local pins.
233 : *
234 : * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
235 : * refcounts are kept track of in the array; after that, new array entries
236 : * displace old ones into the hash table. That way a frequently used entry
237 : * can't get "stuck" in the hashtable while infrequent ones clog the array.
238 : *
239 : * Note that in most scenarios the number of pinned buffers will not exceed
240 : * REFCOUNT_ARRAY_ENTRIES.
241 : *
242 : *
243 : * To enter a buffer into the refcount tracking mechanism first reserve a free
244 : * entry using ReservePrivateRefCountEntry() and then later, if necessary,
245 : * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
246 : * memory allocations in NewPrivateRefCountEntry() which can be important
247 : * because in some scenarios it's called with a spinlock held...
248 : */
249 : static Buffer PrivateRefCountArrayKeys[REFCOUNT_ARRAY_ENTRIES];
250 : static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES];
251 : static HTAB *PrivateRefCountHash = NULL;
252 : static int32 PrivateRefCountOverflowed = 0;
253 : static uint32 PrivateRefCountClock = 0;
254 : static int ReservedRefCountSlot = -1;
255 : static int PrivateRefCountEntryLast = -1;
256 :
257 : static uint32 MaxProportionalPins;
258 :
259 : static void ReservePrivateRefCountEntry(void);
260 : static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
261 : static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
262 : static inline int32 GetPrivateRefCount(Buffer buffer);
263 : static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
264 :
265 : /* ResourceOwner callbacks to hold in-progress I/Os and buffer pins */
266 : static void ResOwnerReleaseBufferIO(Datum res);
267 : static char *ResOwnerPrintBufferIO(Datum res);
268 : static void ResOwnerReleaseBuffer(Datum res);
269 : static char *ResOwnerPrintBuffer(Datum res);
270 :
271 : const ResourceOwnerDesc buffer_io_resowner_desc =
272 : {
273 : .name = "buffer io",
274 : .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
275 : .release_priority = RELEASE_PRIO_BUFFER_IOS,
276 : .ReleaseResource = ResOwnerReleaseBufferIO,
277 : .DebugPrint = ResOwnerPrintBufferIO
278 : };
279 :
280 : const ResourceOwnerDesc buffer_resowner_desc =
281 : {
282 : .name = "buffer",
283 : .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
284 : .release_priority = RELEASE_PRIO_BUFFER_PINS,
285 : .ReleaseResource = ResOwnerReleaseBuffer,
286 : .DebugPrint = ResOwnerPrintBuffer
287 : };
288 :
289 : /*
290 : * Ensure that the PrivateRefCountArray has sufficient space to store one more
291 : * entry. This has to be called before using NewPrivateRefCountEntry() to fill
292 : * a new entry - but it's perfectly fine to not use a reserved entry.
293 : */
294 : static void
295 67700008 : ReservePrivateRefCountEntry(void)
296 : {
297 : /* Already reserved (or freed), nothing to do */
298 67700008 : if (ReservedRefCountSlot != -1)
299 63421964 : return;
300 :
301 : /*
302 : * First search for a free entry the array, that'll be sufficient in the
303 : * majority of cases.
304 : */
305 : {
306 : int i;
307 :
308 38502396 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
309 : {
310 34224352 : if (PrivateRefCountArrayKeys[i] == InvalidBuffer)
311 : {
312 25068134 : ReservedRefCountSlot = i;
313 :
314 : /*
315 : * We could return immediately, but iterating till the end of
316 : * the array allows compiler-autovectorization.
317 : */
318 : }
319 : }
320 :
321 4278044 : if (ReservedRefCountSlot != -1)
322 4094624 : return;
323 : }
324 :
325 : /*
326 : * No luck. All array entries are full. Move one array entry into the hash
327 : * table.
328 : */
329 : {
330 : /*
331 : * Move entry from the current clock position in the array into the
332 : * hashtable. Use that slot.
333 : */
334 : int victim_slot;
335 : PrivateRefCountEntry *victim_entry;
336 : PrivateRefCountEntry *hashent;
337 : bool found;
338 :
339 : /* select victim slot */
340 183420 : victim_slot = PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES;
341 183420 : victim_entry = &PrivateRefCountArray[victim_slot];
342 183420 : ReservedRefCountSlot = victim_slot;
343 :
344 : /* Better be used, otherwise we shouldn't get here. */
345 : Assert(PrivateRefCountArrayKeys[victim_slot] != InvalidBuffer);
346 : Assert(PrivateRefCountArray[victim_slot].buffer != InvalidBuffer);
347 : Assert(PrivateRefCountArrayKeys[victim_slot] == PrivateRefCountArray[victim_slot].buffer);
348 :
349 : /* enter victim array entry into hashtable */
350 183420 : hashent = hash_search(PrivateRefCountHash,
351 183420 : &PrivateRefCountArrayKeys[victim_slot],
352 : HASH_ENTER,
353 : &found);
354 : Assert(!found);
355 : /* move data from the entry in the array to the hash entry */
356 183420 : hashent->data = victim_entry->data;
357 :
358 : /* clear the now free array slot */
359 183420 : PrivateRefCountArrayKeys[victim_slot] = InvalidBuffer;
360 183420 : victim_entry->buffer = InvalidBuffer;
361 :
362 : /* clear the whole data member, just for future proofing */
363 183420 : memset(&victim_entry->data, 0, sizeof(victim_entry->data));
364 183420 : victim_entry->data.refcount = 0;
365 183420 : victim_entry->data.lockmode = BUFFER_LOCK_UNLOCK;
366 :
367 183420 : PrivateRefCountOverflowed++;
368 : }
369 : }
370 :
371 : /*
372 : * Fill a previously reserved refcount entry.
373 : */
374 : static PrivateRefCountEntry *
375 61330216 : NewPrivateRefCountEntry(Buffer buffer)
376 : {
377 : PrivateRefCountEntry *res;
378 :
379 : /* only allowed to be called when a reservation has been made */
380 : Assert(ReservedRefCountSlot != -1);
381 :
382 : /* use up the reserved entry */
383 61330216 : res = &PrivateRefCountArray[ReservedRefCountSlot];
384 :
385 : /* and fill it */
386 61330216 : PrivateRefCountArrayKeys[ReservedRefCountSlot] = buffer;
387 61330216 : res->buffer = buffer;
388 61330216 : res->data.refcount = 0;
389 61330216 : res->data.lockmode = BUFFER_LOCK_UNLOCK;
390 :
391 : /* update cache for the next lookup */
392 61330216 : PrivateRefCountEntryLast = ReservedRefCountSlot;
393 :
394 61330216 : ReservedRefCountSlot = -1;
395 :
396 61330216 : return res;
397 : }
398 :
399 : /*
400 : * Slow-path for GetPrivateRefCountEntry(). This is big enough to not be worth
401 : * inlining. This particularly seems to be true if the compiler is capable of
402 : * auto-vectorizing the code, as that imposes additional stack-alignment
403 : * requirements etc.
404 : */
405 : static pg_noinline PrivateRefCountEntry *
406 76273922 : GetPrivateRefCountEntrySlow(Buffer buffer, bool do_move)
407 : {
408 : PrivateRefCountEntry *res;
409 76273922 : int match = -1;
410 : int i;
411 :
412 : /*
413 : * First search for references in the array, that'll be sufficient in the
414 : * majority of cases.
415 : */
416 686465298 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
417 : {
418 610191376 : if (PrivateRefCountArrayKeys[i] == buffer)
419 : {
420 17025695 : match = i;
421 : /* see ReservePrivateRefCountEntry() for why we don't return */
422 : }
423 : }
424 :
425 76273922 : if (likely(match != -1))
426 : {
427 : /* update cache for the next lookup */
428 17025695 : PrivateRefCountEntryLast = match;
429 :
430 17025695 : return &PrivateRefCountArray[match];
431 : }
432 :
433 : /*
434 : * By here we know that the buffer, if already pinned, isn't residing in
435 : * the array.
436 : *
437 : * Only look up the buffer in the hashtable if we've previously overflowed
438 : * into it.
439 : */
440 59248227 : if (PrivateRefCountOverflowed == 0)
441 58809283 : return NULL;
442 :
443 438944 : res = hash_search(PrivateRefCountHash, &buffer, HASH_FIND, NULL);
444 :
445 438944 : if (res == NULL)
446 210278 : return NULL;
447 228666 : else if (!do_move)
448 : {
449 : /* caller doesn't want us to move the hash entry into the array */
450 132453 : return res;
451 : }
452 : else
453 : {
454 : /* move buffer from hashtable into the free array slot */
455 : bool found;
456 : PrivateRefCountEntry *free;
457 :
458 : /* Ensure there's a free array slot */
459 96213 : ReservePrivateRefCountEntry();
460 :
461 : /* Use up the reserved slot */
462 : Assert(ReservedRefCountSlot != -1);
463 96213 : free = &PrivateRefCountArray[ReservedRefCountSlot];
464 : Assert(PrivateRefCountArrayKeys[ReservedRefCountSlot] == free->buffer);
465 : Assert(free->buffer == InvalidBuffer);
466 :
467 : /* and fill it */
468 96213 : free->buffer = buffer;
469 96213 : free->data = res->data;
470 96213 : PrivateRefCountArrayKeys[ReservedRefCountSlot] = buffer;
471 : /* update cache for the next lookup */
472 96213 : PrivateRefCountEntryLast = match;
473 :
474 96213 : ReservedRefCountSlot = -1;
475 :
476 :
477 : /* delete from hashtable */
478 96213 : hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
479 : Assert(found);
480 : Assert(PrivateRefCountOverflowed > 0);
481 96213 : PrivateRefCountOverflowed--;
482 :
483 96213 : return free;
484 : }
485 : }
486 :
487 : /*
488 : * Return the PrivateRefCount entry for the passed buffer.
489 : *
490 : * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
491 : * do_move is true, and the entry resides in the hashtable the entry is
492 : * optimized for frequent access by moving it to the array.
493 : */
494 : static inline PrivateRefCountEntry *
495 328974208 : GetPrivateRefCountEntry(Buffer buffer, bool do_move)
496 : {
497 : Assert(BufferIsValid(buffer));
498 : Assert(!BufferIsLocal(buffer));
499 :
500 : /*
501 : * It's very common to look up the same buffer repeatedly. To make that
502 : * fast, we have a one-entry cache.
503 : *
504 : * In contrast to the loop in GetPrivateRefCountEntrySlow(), here it
505 : * faster to check PrivateRefCountArray[].buffer, as in the case of a hit
506 : * fewer addresses are computed and fewer cachelines are accessed. Whereas
507 : * in GetPrivateRefCountEntrySlow()'s case, checking
508 : * PrivateRefCountArrayKeys saves a lot of memory accesses.
509 : */
510 328974208 : if (likely(PrivateRefCountEntryLast != -1) &&
511 328910496 : likely(PrivateRefCountArray[PrivateRefCountEntryLast].buffer == buffer))
512 : {
513 252700286 : return &PrivateRefCountArray[PrivateRefCountEntryLast];
514 : }
515 :
516 : /*
517 : * The code for the cached lookup is small enough to be worth inlining
518 : * into the caller. In the miss case however, that empirically doesn't
519 : * seem worth it.
520 : */
521 76273922 : return GetPrivateRefCountEntrySlow(buffer, do_move);
522 : }
523 :
524 : /*
525 : * Returns how many times the passed buffer is pinned by this backend.
526 : *
527 : * Only works for shared memory buffers!
528 : */
529 : static inline int32
530 3145487 : GetPrivateRefCount(Buffer buffer)
531 : {
532 : PrivateRefCountEntry *ref;
533 :
534 : Assert(BufferIsValid(buffer));
535 : Assert(!BufferIsLocal(buffer));
536 :
537 : /*
538 : * Not moving the entry - that's ok for the current users, but we might
539 : * want to change this one day.
540 : */
541 3145487 : ref = GetPrivateRefCountEntry(buffer, false);
542 :
543 3145487 : if (ref == NULL)
544 3202 : return 0;
545 3142285 : return ref->data.refcount;
546 : }
547 :
548 : /*
549 : * Release resources used to track the reference count of a buffer which we no
550 : * longer have pinned and don't want to pin again immediately.
551 : */
552 : static void
553 61330216 : ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
554 : {
555 : Assert(ref->data.refcount == 0);
556 : Assert(ref->data.lockmode == BUFFER_LOCK_UNLOCK);
557 :
558 61330216 : if (ref >= &PrivateRefCountArray[0] &&
559 : ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
560 : {
561 61243009 : ref->buffer = InvalidBuffer;
562 61243009 : PrivateRefCountArrayKeys[ref - PrivateRefCountArray] = InvalidBuffer;
563 :
564 :
565 : /*
566 : * Mark the just used entry as reserved - in many scenarios that
567 : * allows us to avoid ever having to search the array/hash for free
568 : * entries.
569 : */
570 61243009 : ReservedRefCountSlot = ref - PrivateRefCountArray;
571 : }
572 : else
573 : {
574 : bool found;
575 87207 : Buffer buffer = ref->buffer;
576 :
577 87207 : hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
578 : Assert(found);
579 : Assert(PrivateRefCountOverflowed > 0);
580 87207 : PrivateRefCountOverflowed--;
581 : }
582 61330216 : }
583 :
584 : /*
585 : * BufferIsPinned
586 : * True iff the buffer is pinned (also checks for valid buffer number).
587 : *
588 : * NOTE: what we check here is that *this* backend holds a pin on
589 : * the buffer. We do not care whether some other backend does.
590 : */
591 : #define BufferIsPinned(bufnum) \
592 : ( \
593 : !BufferIsValid(bufnum) ? \
594 : false \
595 : : \
596 : BufferIsLocal(bufnum) ? \
597 : (LocalRefCount[-(bufnum) - 1] > 0) \
598 : : \
599 : (GetPrivateRefCount(bufnum) > 0) \
600 : )
601 :
602 :
603 : static Buffer ReadBuffer_common(Relation rel,
604 : SMgrRelation smgr, char smgr_persistence,
605 : ForkNumber forkNum, BlockNumber blockNum,
606 : ReadBufferMode mode, BufferAccessStrategy strategy);
607 : static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr,
608 : ForkNumber fork,
609 : BufferAccessStrategy strategy,
610 : uint32 flags,
611 : uint32 extend_by,
612 : BlockNumber extend_upto,
613 : Buffer *buffers,
614 : uint32 *extended_by);
615 : static BlockNumber ExtendBufferedRelShared(BufferManagerRelation bmr,
616 : ForkNumber fork,
617 : BufferAccessStrategy strategy,
618 : uint32 flags,
619 : uint32 extend_by,
620 : BlockNumber extend_upto,
621 : Buffer *buffers,
622 : uint32 *extended_by);
623 : static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
624 : bool skip_if_not_valid);
625 : static void PinBuffer_Locked(BufferDesc *buf);
626 : static void UnpinBuffer(BufferDesc *buf);
627 : static void UnpinBufferNoOwner(BufferDesc *buf);
628 : static void BufferSync(int flags);
629 : static int SyncOneBuffer(int buf_id, bool skip_recently_used,
630 : WritebackContext *wb_context);
631 : static void WaitIO(BufferDesc *buf);
632 : static void AbortBufferIO(Buffer buffer);
633 : static void shared_buffer_write_error_callback(void *arg);
634 : static void local_buffer_write_error_callback(void *arg);
635 : static inline BufferDesc *BufferAlloc(SMgrRelation smgr,
636 : char relpersistence,
637 : ForkNumber forkNum,
638 : BlockNumber blockNum,
639 : BufferAccessStrategy strategy,
640 : bool *foundPtr, IOContext io_context);
641 : static bool AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress);
642 : static void CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete);
643 : static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
644 : static void FlushUnlockedBuffer(BufferDesc *buf, SMgrRelation reln,
645 : IOObject io_object, IOContext io_context);
646 : static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
647 : IOObject io_object, IOContext io_context);
648 : static void FindAndDropRelationBuffers(RelFileLocator rlocator,
649 : ForkNumber forkNum,
650 : BlockNumber nForkBlock,
651 : BlockNumber firstDelBlock);
652 : static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
653 : RelFileLocator dstlocator,
654 : ForkNumber forkNum, bool permanent);
655 : static void AtProcExit_Buffers(int code, Datum arg);
656 : static void CheckForBufferLeaks(void);
657 : #ifdef USE_ASSERT_CHECKING
658 : static void AssertNotCatalogBufferLock(Buffer buffer, BufferLockMode mode);
659 : #endif
660 : static int rlocator_comparator(const void *p1, const void *p2);
661 : static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
662 : static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
663 : static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
664 :
665 : static void BufferLockAcquire(Buffer buffer, BufferDesc *buf_hdr, BufferLockMode mode);
666 : static void BufferLockUnlock(Buffer buffer, BufferDesc *buf_hdr);
667 : static bool BufferLockConditional(Buffer buffer, BufferDesc *buf_hdr, BufferLockMode mode);
668 : static bool BufferLockHeldByMeInMode(BufferDesc *buf_hdr, BufferLockMode mode);
669 : static bool BufferLockHeldByMe(BufferDesc *buf_hdr);
670 : static inline void BufferLockDisown(Buffer buffer, BufferDesc *buf_hdr);
671 : static inline int BufferLockDisownInternal(Buffer buffer, BufferDesc *buf_hdr);
672 : static inline bool BufferLockAttempt(BufferDesc *buf_hdr, BufferLockMode mode);
673 : static void BufferLockQueueSelf(BufferDesc *buf_hdr, BufferLockMode mode);
674 : static void BufferLockDequeueSelf(BufferDesc *buf_hdr);
675 : static void BufferLockWakeup(BufferDesc *buf_hdr, bool unlocked);
676 : static void BufferLockProcessRelease(BufferDesc *buf_hdr, BufferLockMode mode, uint64 lockstate);
677 : static inline uint64 BufferLockReleaseSub(BufferLockMode mode);
678 :
679 :
680 : /*
681 : * Implementation of PrefetchBuffer() for shared buffers.
682 : */
683 : PrefetchBufferResult
684 32779 : PrefetchSharedBuffer(SMgrRelation smgr_reln,
685 : ForkNumber forkNum,
686 : BlockNumber blockNum)
687 : {
688 32779 : PrefetchBufferResult result = {InvalidBuffer, false};
689 : BufferTag newTag; /* identity of requested block */
690 : uint32 newHash; /* hash value for newTag */
691 : LWLock *newPartitionLock; /* buffer partition lock for it */
692 : int buf_id;
693 :
694 : Assert(BlockNumberIsValid(blockNum));
695 :
696 : /* create a tag so we can lookup the buffer */
697 32779 : InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
698 : forkNum, blockNum);
699 :
700 : /* determine its hash code and partition lock ID */
701 32779 : newHash = BufTableHashCode(&newTag);
702 32779 : newPartitionLock = BufMappingPartitionLock(newHash);
703 :
704 : /* see if the block is in the buffer pool already */
705 32779 : LWLockAcquire(newPartitionLock, LW_SHARED);
706 32779 : buf_id = BufTableLookup(&newTag, newHash);
707 32779 : LWLockRelease(newPartitionLock);
708 :
709 : /* If not in buffers, initiate prefetch */
710 32779 : if (buf_id < 0)
711 : {
712 : #ifdef USE_PREFETCH
713 : /*
714 : * Try to initiate an asynchronous read. This returns false in
715 : * recovery if the relation file doesn't exist.
716 : */
717 18031 : if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
718 8904 : smgrprefetch(smgr_reln, forkNum, blockNum, 1))
719 : {
720 8904 : result.initiated_io = true;
721 : }
722 : #endif /* USE_PREFETCH */
723 : }
724 : else
725 : {
726 : /*
727 : * Report the buffer it was in at that time. The caller may be able
728 : * to avoid a buffer table lookup, but it's not pinned and it must be
729 : * rechecked!
730 : */
731 23652 : result.recent_buffer = buf_id + 1;
732 : }
733 :
734 : /*
735 : * If the block *is* in buffers, we do nothing. This is not really ideal:
736 : * the block might be just about to be evicted, which would be stupid
737 : * since we know we are going to need it soon. But the only easy answer
738 : * is to bump the usage_count, which does not seem like a great solution:
739 : * when the caller does ultimately touch the block, usage_count would get
740 : * bumped again, resulting in too much favoritism for blocks that are
741 : * involved in a prefetch sequence. A real fix would involve some
742 : * additional per-buffer state, and it's not clear that there's enough of
743 : * a problem to justify that.
744 : */
745 :
746 32779 : return result;
747 : }
748 :
749 : /*
750 : * PrefetchBuffer -- initiate asynchronous read of a block of a relation
751 : *
752 : * This is named by analogy to ReadBuffer but doesn't actually allocate a
753 : * buffer. Instead it tries to ensure that a future ReadBuffer for the given
754 : * block will not be delayed by the I/O. Prefetching is optional.
755 : *
756 : * There are three possible outcomes:
757 : *
758 : * 1. If the block is already cached, the result includes a valid buffer that
759 : * could be used by the caller to avoid the need for a later buffer lookup, but
760 : * it's not pinned, so the caller must recheck it.
761 : *
762 : * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
763 : * true. Currently there is no way to know if the data was already cached by
764 : * the kernel and therefore didn't really initiate I/O, and no way to know when
765 : * the I/O completes other than using synchronous ReadBuffer().
766 : *
767 : * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and
768 : * USE_PREFETCH is not defined (this build doesn't support prefetching due to
769 : * lack of a kernel facility), direct I/O is enabled, or the underlying
770 : * relation file wasn't found and we are in recovery. (If the relation file
771 : * wasn't found and we are not in recovery, an error is raised).
772 : */
773 : PrefetchBufferResult
774 22019 : PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
775 : {
776 : Assert(RelationIsValid(reln));
777 : Assert(BlockNumberIsValid(blockNum));
778 :
779 22019 : if (RelationUsesLocalBuffers(reln))
780 : {
781 : /* see comments in ReadBufferExtended */
782 783 : if (RELATION_IS_OTHER_TEMP(reln))
783 0 : ereport(ERROR,
784 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
785 : errmsg("cannot access temporary tables of other sessions")));
786 :
787 : /* pass it off to localbuf.c */
788 783 : return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
789 : }
790 : else
791 : {
792 : /* pass it to the shared buffer version */
793 21236 : return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
794 : }
795 : }
796 :
797 : /*
798 : * ReadRecentBuffer -- try to pin a block in a recently observed buffer
799 : *
800 : * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
801 : * successful. Return true if the buffer is valid and still has the expected
802 : * tag. In that case, the buffer is pinned and the usage count is bumped.
803 : */
804 : bool
805 4544 : ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
806 : Buffer recent_buffer)
807 : {
808 : BufferDesc *bufHdr;
809 : BufferTag tag;
810 : uint64 buf_state;
811 :
812 : Assert(BufferIsValid(recent_buffer));
813 :
814 4544 : ResourceOwnerEnlarge(CurrentResourceOwner);
815 4544 : ReservePrivateRefCountEntry();
816 4544 : InitBufferTag(&tag, &rlocator, forkNum, blockNum);
817 :
818 4544 : if (BufferIsLocal(recent_buffer))
819 : {
820 32 : int b = -recent_buffer - 1;
821 :
822 32 : bufHdr = GetLocalBufferDescriptor(b);
823 32 : buf_state = pg_atomic_read_u64(&bufHdr->state);
824 :
825 : /* Is it still valid and holding the right tag? */
826 32 : if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
827 : {
828 32 : PinLocalBuffer(bufHdr, true);
829 :
830 32 : pgBufferUsage.local_blks_hit++;
831 :
832 32 : return true;
833 : }
834 : }
835 : else
836 : {
837 4512 : bufHdr = GetBufferDescriptor(recent_buffer - 1);
838 :
839 : /*
840 : * Is it still valid and holding the right tag? We do an unlocked tag
841 : * comparison first, to make it unlikely that we'll increment the
842 : * usage counter of the wrong buffer, if someone calls us with a very
843 : * out of date recent_buffer. Then we'll check it again if we get the
844 : * pin.
845 : */
846 8987 : if (BufferTagsEqual(&tag, &bufHdr->tag) &&
847 4475 : PinBuffer(bufHdr, NULL, true))
848 : {
849 4469 : if (BufferTagsEqual(&tag, &bufHdr->tag))
850 : {
851 4469 : pgBufferUsage.shared_blks_hit++;
852 4469 : return true;
853 : }
854 0 : UnpinBuffer(bufHdr);
855 : }
856 : }
857 :
858 43 : return false;
859 : }
860 :
861 : /*
862 : * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
863 : * fork with RBM_NORMAL mode and default strategy.
864 : */
865 : Buffer
866 45332763 : ReadBuffer(Relation reln, BlockNumber blockNum)
867 : {
868 45332763 : return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
869 : }
870 :
871 : /*
872 : * ReadBufferExtended -- returns a buffer containing the requested
873 : * block of the requested relation. If the blknum
874 : * requested is P_NEW, extend the relation file and
875 : * allocate a new block. (Caller is responsible for
876 : * ensuring that only one backend tries to extend a
877 : * relation at the same time!)
878 : *
879 : * Returns: the buffer number for the buffer containing
880 : * the block read. The returned buffer has been pinned.
881 : * Does not return on error --- elog's instead.
882 : *
883 : * Assume when this function is called, that reln has been opened already.
884 : *
885 : * In RBM_NORMAL mode, the page is read from disk, and the page header is
886 : * validated. An error is thrown if the page header is not valid. (But
887 : * note that an all-zero page is considered "valid"; see
888 : * PageIsVerified().)
889 : *
890 : * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
891 : * valid, the page is zeroed instead of throwing an error. This is intended
892 : * for non-critical data, where the caller is prepared to repair errors.
893 : *
894 : * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
895 : * filled with zeros instead of reading it from disk. Useful when the caller
896 : * is going to fill the page from scratch, since this saves I/O and avoids
897 : * unnecessary failure if the page-on-disk has corrupt page headers.
898 : * The page is returned locked to ensure that the caller has a chance to
899 : * initialize the page before it's made visible to others.
900 : * Caution: do not use this mode to read a page that is beyond the relation's
901 : * current physical EOF; that is likely to cause problems in md.c when
902 : * the page is modified and written out. P_NEW is OK, though.
903 : *
904 : * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
905 : * a cleanup-strength lock on the page.
906 : *
907 : * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
908 : *
909 : * If strategy is not NULL, a nondefault buffer access strategy is used.
910 : * See buffer/README for details.
911 : */
912 : inline Buffer
913 54364958 : ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
914 : ReadBufferMode mode, BufferAccessStrategy strategy)
915 : {
916 : Buffer buf;
917 :
918 : /*
919 : * Reject attempts to read non-local temporary relations; we would be
920 : * likely to get wrong data since we have no visibility into the owning
921 : * session's local buffers.
922 : */
923 54364958 : if (RELATION_IS_OTHER_TEMP(reln))
924 0 : ereport(ERROR,
925 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
926 : errmsg("cannot access temporary tables of other sessions")));
927 :
928 : /*
929 : * Read the buffer, and update pgstat counters to reflect a cache hit or
930 : * miss.
931 : */
932 54364958 : buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
933 : forkNum, blockNum, mode, strategy);
934 :
935 54364935 : return buf;
936 : }
937 :
938 :
939 : /*
940 : * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
941 : * a relcache entry for the relation.
942 : *
943 : * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
944 : * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
945 : * cannot be used for temporary relations (and making that work might be
946 : * difficult, unless we only want to read temporary relations for our own
947 : * ProcNumber).
948 : */
949 : Buffer
950 5805008 : ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
951 : BlockNumber blockNum, ReadBufferMode mode,
952 : BufferAccessStrategy strategy, bool permanent)
953 : {
954 5805008 : SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
955 :
956 5805008 : return ReadBuffer_common(NULL, smgr,
957 : permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
958 : forkNum, blockNum,
959 : mode, strategy);
960 : }
961 :
962 : /*
963 : * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
964 : */
965 : Buffer
966 46171 : ExtendBufferedRel(BufferManagerRelation bmr,
967 : ForkNumber forkNum,
968 : BufferAccessStrategy strategy,
969 : uint32 flags)
970 : {
971 : Buffer buf;
972 46171 : uint32 extend_by = 1;
973 :
974 46171 : ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
975 : &buf, &extend_by);
976 :
977 46171 : return buf;
978 : }
979 :
980 : /*
981 : * Extend relation by multiple blocks.
982 : *
983 : * Tries to extend the relation by extend_by blocks. Depending on the
984 : * availability of resources the relation may end up being extended by a
985 : * smaller number of pages (unless an error is thrown, always by at least one
986 : * page). *extended_by is updated to the number of pages the relation has been
987 : * extended to.
988 : *
989 : * buffers needs to be an array that is at least extend_by long. Upon
990 : * completion, the first extend_by array elements will point to a pinned
991 : * buffer.
992 : *
993 : * If EB_LOCK_FIRST is part of flags, the first returned buffer is
994 : * locked. This is useful for callers that want a buffer that is guaranteed to
995 : * be empty.
996 : */
997 : BlockNumber
998 162387 : ExtendBufferedRelBy(BufferManagerRelation bmr,
999 : ForkNumber fork,
1000 : BufferAccessStrategy strategy,
1001 : uint32 flags,
1002 : uint32 extend_by,
1003 : Buffer *buffers,
1004 : uint32 *extended_by)
1005 : {
1006 : Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
1007 : Assert(bmr.smgr == NULL || bmr.relpersistence != '\0');
1008 : Assert(extend_by > 0);
1009 :
1010 162387 : if (bmr.relpersistence == '\0')
1011 162387 : bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
1012 :
1013 162387 : return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
1014 : extend_by, InvalidBlockNumber,
1015 : buffers, extended_by);
1016 : }
1017 :
1018 : /*
1019 : * Extend the relation so it is at least extend_to blocks large, return buffer
1020 : * (extend_to - 1).
1021 : *
1022 : * This is useful for callers that want to write a specific page, regardless
1023 : * of the current size of the relation (e.g. useful for visibilitymap and for
1024 : * crash recovery).
1025 : */
1026 : Buffer
1027 51996 : ExtendBufferedRelTo(BufferManagerRelation bmr,
1028 : ForkNumber fork,
1029 : BufferAccessStrategy strategy,
1030 : uint32 flags,
1031 : BlockNumber extend_to,
1032 : ReadBufferMode mode)
1033 : {
1034 : BlockNumber current_size;
1035 51996 : uint32 extended_by = 0;
1036 51996 : Buffer buffer = InvalidBuffer;
1037 : Buffer buffers[64];
1038 :
1039 : Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
1040 : Assert(bmr.smgr == NULL || bmr.relpersistence != '\0');
1041 : Assert(extend_to != InvalidBlockNumber && extend_to > 0);
1042 :
1043 51996 : if (bmr.relpersistence == '\0')
1044 7168 : bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
1045 :
1046 : /*
1047 : * If desired, create the file if it doesn't exist. If
1048 : * smgr_cached_nblocks[fork] is positive then it must exist, no need for
1049 : * an smgrexists call.
1050 : */
1051 51996 : if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
1052 7168 : (BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] == 0 ||
1053 17 : BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] == InvalidBlockNumber) &&
1054 7151 : !smgrexists(BMR_GET_SMGR(bmr), fork))
1055 : {
1056 7139 : LockRelationForExtension(bmr.rel, ExclusiveLock);
1057 :
1058 : /* recheck, fork might have been created concurrently */
1059 7139 : if (!smgrexists(BMR_GET_SMGR(bmr), fork))
1060 7135 : smgrcreate(BMR_GET_SMGR(bmr), fork, flags & EB_PERFORMING_RECOVERY);
1061 :
1062 7139 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
1063 : }
1064 :
1065 : /*
1066 : * If requested, invalidate size cache, so that smgrnblocks asks the
1067 : * kernel.
1068 : */
1069 51996 : if (flags & EB_CLEAR_SIZE_CACHE)
1070 7168 : BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] = InvalidBlockNumber;
1071 :
1072 : /*
1073 : * Estimate how many pages we'll need to extend by. This avoids acquiring
1074 : * unnecessarily many victim buffers.
1075 : */
1076 51996 : current_size = smgrnblocks(BMR_GET_SMGR(bmr), fork);
1077 :
1078 : /*
1079 : * Since no-one else can be looking at the page contents yet, there is no
1080 : * difference between an exclusive lock and a cleanup-strength lock. Note
1081 : * that we pass the original mode to ReadBuffer_common() below, when
1082 : * falling back to reading the buffer to a concurrent relation extension.
1083 : */
1084 51996 : if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
1085 44454 : flags |= EB_LOCK_TARGET;
1086 :
1087 106151 : while (current_size < extend_to)
1088 : {
1089 54155 : uint32 num_pages = lengthof(buffers);
1090 : BlockNumber first_block;
1091 :
1092 54155 : if ((uint64) current_size + num_pages > extend_to)
1093 54089 : num_pages = extend_to - current_size;
1094 :
1095 54155 : first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
1096 : num_pages, extend_to,
1097 : buffers, &extended_by);
1098 :
1099 54155 : current_size = first_block + extended_by;
1100 : Assert(num_pages != 0 || current_size >= extend_to);
1101 :
1102 115644 : for (uint32 i = 0; i < extended_by; i++)
1103 : {
1104 61489 : if (first_block + i != extend_to - 1)
1105 9499 : ReleaseBuffer(buffers[i]);
1106 : else
1107 51990 : buffer = buffers[i];
1108 : }
1109 : }
1110 :
1111 : /*
1112 : * It's possible that another backend concurrently extended the relation.
1113 : * In that case read the buffer.
1114 : *
1115 : * XXX: Should we control this via a flag?
1116 : */
1117 51996 : if (buffer == InvalidBuffer)
1118 : {
1119 : Assert(extended_by == 0);
1120 6 : buffer = ReadBuffer_common(bmr.rel, BMR_GET_SMGR(bmr), bmr.relpersistence,
1121 : fork, extend_to - 1, mode, strategy);
1122 : }
1123 :
1124 51996 : return buffer;
1125 : }
1126 :
1127 : /*
1128 : * Lock and optionally zero a buffer, as part of the implementation of
1129 : * RBM_ZERO_AND_LOCK or RBM_ZERO_AND_CLEANUP_LOCK. The buffer must be already
1130 : * pinned. If the buffer is not already valid, it is zeroed and made valid.
1131 : */
1132 : static void
1133 328779 : ZeroAndLockBuffer(Buffer buffer, ReadBufferMode mode, bool already_valid)
1134 : {
1135 : BufferDesc *bufHdr;
1136 : bool need_to_zero;
1137 328779 : bool isLocalBuf = BufferIsLocal(buffer);
1138 :
1139 : Assert(mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
1140 :
1141 328779 : if (already_valid)
1142 : {
1143 : /*
1144 : * If the caller already knew the buffer was valid, we can skip some
1145 : * header interaction. The caller just wants to lock the buffer.
1146 : */
1147 37767 : need_to_zero = false;
1148 : }
1149 291012 : else if (isLocalBuf)
1150 : {
1151 : /* Simple case for non-shared buffers. */
1152 24 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1153 24 : need_to_zero = StartLocalBufferIO(bufHdr, true, false);
1154 : }
1155 : else
1156 : {
1157 : /*
1158 : * Take BM_IO_IN_PROGRESS, or discover that BM_VALID has been set
1159 : * concurrently. Even though we aren't doing I/O, that ensures that
1160 : * we don't zero a page that someone else has pinned. An exclusive
1161 : * content lock wouldn't be enough, because readers are allowed to
1162 : * drop the content lock after determining that a tuple is visible
1163 : * (see buffer access rules in README).
1164 : */
1165 290988 : bufHdr = GetBufferDescriptor(buffer - 1);
1166 290988 : need_to_zero = StartBufferIO(bufHdr, true, false);
1167 : }
1168 :
1169 328779 : if (need_to_zero)
1170 : {
1171 291012 : memset(BufferGetPage(buffer), 0, BLCKSZ);
1172 :
1173 : /*
1174 : * Grab the buffer content lock before marking the page as valid, to
1175 : * make sure that no other backend sees the zeroed page before the
1176 : * caller has had a chance to initialize it.
1177 : *
1178 : * Since no-one else can be looking at the page contents yet, there is
1179 : * no difference between an exclusive lock and a cleanup-strength
1180 : * lock. (Note that we cannot use LockBuffer() or
1181 : * LockBufferForCleanup() here, because they assert that the buffer is
1182 : * already valid.)
1183 : */
1184 291012 : if (!isLocalBuf)
1185 290988 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
1186 :
1187 : /* Set BM_VALID, terminate IO, and wake up any waiters */
1188 291012 : if (isLocalBuf)
1189 24 : TerminateLocalBufferIO(bufHdr, false, BM_VALID, false);
1190 : else
1191 290988 : TerminateBufferIO(bufHdr, false, BM_VALID, true, false);
1192 : }
1193 37767 : else if (!isLocalBuf)
1194 : {
1195 : /*
1196 : * The buffer is valid, so we can't zero it. The caller still expects
1197 : * the page to be locked on return.
1198 : */
1199 37747 : if (mode == RBM_ZERO_AND_LOCK)
1200 37710 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
1201 : else
1202 37 : LockBufferForCleanup(buffer);
1203 : }
1204 328779 : }
1205 :
1206 : /*
1207 : * Pin a buffer for a given block. *foundPtr is set to true if the block was
1208 : * already present, or false if more work is required to either read it in or
1209 : * zero it.
1210 : */
1211 : static pg_attribute_always_inline Buffer
1212 64451286 : PinBufferForBlock(Relation rel,
1213 : SMgrRelation smgr,
1214 : char persistence,
1215 : ForkNumber forkNum,
1216 : BlockNumber blockNum,
1217 : BufferAccessStrategy strategy,
1218 : bool *foundPtr)
1219 : {
1220 : BufferDesc *bufHdr;
1221 : IOContext io_context;
1222 : IOObject io_object;
1223 :
1224 : Assert(blockNum != P_NEW);
1225 :
1226 : /* Persistence should be set before */
1227 : Assert((persistence == RELPERSISTENCE_TEMP ||
1228 : persistence == RELPERSISTENCE_PERMANENT ||
1229 : persistence == RELPERSISTENCE_UNLOGGED));
1230 :
1231 64451286 : if (persistence == RELPERSISTENCE_TEMP)
1232 : {
1233 1280662 : io_context = IOCONTEXT_NORMAL;
1234 1280662 : io_object = IOOBJECT_TEMP_RELATION;
1235 : }
1236 : else
1237 : {
1238 63170624 : io_context = IOContextForStrategy(strategy);
1239 63170624 : io_object = IOOBJECT_RELATION;
1240 : }
1241 :
1242 : TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
1243 : smgr->smgr_rlocator.locator.spcOid,
1244 : smgr->smgr_rlocator.locator.dbOid,
1245 : smgr->smgr_rlocator.locator.relNumber,
1246 : smgr->smgr_rlocator.backend);
1247 :
1248 64451286 : if (persistence == RELPERSISTENCE_TEMP)
1249 : {
1250 1280662 : bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, foundPtr);
1251 1280656 : if (*foundPtr)
1252 1272265 : pgBufferUsage.local_blks_hit++;
1253 : }
1254 : else
1255 : {
1256 63170624 : bufHdr = BufferAlloc(smgr, persistence, forkNum, blockNum,
1257 : strategy, foundPtr, io_context);
1258 63170624 : if (*foundPtr)
1259 61394430 : pgBufferUsage.shared_blks_hit++;
1260 : }
1261 64451280 : if (rel)
1262 : {
1263 : /*
1264 : * While pgBufferUsage's "read" counter isn't bumped unless we reach
1265 : * WaitReadBuffers() (so, not for hits, and not for buffers that are
1266 : * zeroed instead), the per-relation stats always count them.
1267 : */
1268 58404491 : pgstat_count_buffer_read(rel);
1269 58404491 : if (*foundPtr)
1270 57095787 : pgstat_count_buffer_hit(rel);
1271 : }
1272 64451280 : if (*foundPtr)
1273 : {
1274 62666695 : pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
1275 62666695 : if (VacuumCostActive)
1276 3219508 : VacuumCostBalance += VacuumCostPageHit;
1277 :
1278 : TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1279 : smgr->smgr_rlocator.locator.spcOid,
1280 : smgr->smgr_rlocator.locator.dbOid,
1281 : smgr->smgr_rlocator.locator.relNumber,
1282 : smgr->smgr_rlocator.backend,
1283 : true);
1284 : }
1285 :
1286 64451280 : return BufferDescriptorGetBuffer(bufHdr);
1287 : }
1288 :
1289 : /*
1290 : * ReadBuffer_common -- common logic for all ReadBuffer variants
1291 : *
1292 : * smgr is required, rel is optional unless using P_NEW.
1293 : */
1294 : static pg_attribute_always_inline Buffer
1295 60169972 : ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence,
1296 : ForkNumber forkNum,
1297 : BlockNumber blockNum, ReadBufferMode mode,
1298 : BufferAccessStrategy strategy)
1299 : {
1300 : ReadBuffersOperation operation;
1301 : Buffer buffer;
1302 : int flags;
1303 : char persistence;
1304 :
1305 : /*
1306 : * Backward compatibility path, most code should use ExtendBufferedRel()
1307 : * instead, as acquiring the extension lock inside ExtendBufferedRel()
1308 : * scales a lot better.
1309 : */
1310 60169972 : if (unlikely(blockNum == P_NEW))
1311 : {
1312 261 : uint32 flags = EB_SKIP_EXTENSION_LOCK;
1313 :
1314 : /*
1315 : * Since no-one else can be looking at the page contents yet, there is
1316 : * no difference between an exclusive lock and a cleanup-strength
1317 : * lock.
1318 : */
1319 261 : if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
1320 0 : flags |= EB_LOCK_FIRST;
1321 :
1322 261 : return ExtendBufferedRel(BMR_REL(rel), forkNum, strategy, flags);
1323 : }
1324 :
1325 60169711 : if (rel)
1326 54364703 : persistence = rel->rd_rel->relpersistence;
1327 : else
1328 5805008 : persistence = smgr_persistence;
1329 :
1330 60169711 : if (unlikely(mode == RBM_ZERO_AND_CLEANUP_LOCK ||
1331 : mode == RBM_ZERO_AND_LOCK))
1332 : {
1333 : bool found;
1334 :
1335 328779 : buffer = PinBufferForBlock(rel, smgr, persistence,
1336 : forkNum, blockNum, strategy, &found);
1337 328779 : ZeroAndLockBuffer(buffer, mode, found);
1338 328779 : return buffer;
1339 : }
1340 :
1341 : /*
1342 : * Signal that we are going to immediately wait. If we're immediately
1343 : * waiting, there is no benefit in actually executing the IO
1344 : * asynchronously, it would just add dispatch overhead.
1345 : */
1346 59840932 : flags = READ_BUFFERS_SYNCHRONOUSLY;
1347 59840932 : if (mode == RBM_ZERO_ON_ERROR)
1348 1511036 : flags |= READ_BUFFERS_ZERO_ON_ERROR;
1349 59840932 : operation.smgr = smgr;
1350 59840932 : operation.rel = rel;
1351 59840932 : operation.persistence = persistence;
1352 59840932 : operation.forknum = forkNum;
1353 59840932 : operation.strategy = strategy;
1354 59840932 : if (StartReadBuffer(&operation,
1355 : &buffer,
1356 : blockNum,
1357 : flags))
1358 707104 : WaitReadBuffers(&operation);
1359 :
1360 59840909 : return buffer;
1361 : }
1362 :
1363 : static pg_attribute_always_inline bool
1364 63949220 : StartReadBuffersImpl(ReadBuffersOperation *operation,
1365 : Buffer *buffers,
1366 : BlockNumber blockNum,
1367 : int *nblocks,
1368 : int flags,
1369 : bool allow_forwarding)
1370 : {
1371 63949220 : int actual_nblocks = *nblocks;
1372 63949220 : int maxcombine = 0;
1373 : bool did_start_io;
1374 :
1375 : Assert(*nblocks == 1 || allow_forwarding);
1376 : Assert(*nblocks > 0);
1377 : Assert(*nblocks <= MAX_IO_COMBINE_LIMIT);
1378 :
1379 65442793 : for (int i = 0; i < actual_nblocks; ++i)
1380 : {
1381 : bool found;
1382 :
1383 64124165 : if (allow_forwarding && buffers[i] != InvalidBuffer)
1384 1658 : {
1385 : BufferDesc *bufHdr;
1386 :
1387 : /*
1388 : * This is a buffer that was pinned by an earlier call to
1389 : * StartReadBuffers(), but couldn't be handled in one operation at
1390 : * that time. The operation was split, and the caller has passed
1391 : * an already pinned buffer back to us to handle the rest of the
1392 : * operation. It must continue at the expected block number.
1393 : */
1394 : Assert(BufferGetBlockNumber(buffers[i]) == blockNum + i);
1395 :
1396 : /*
1397 : * It might be an already valid buffer (a hit) that followed the
1398 : * final contiguous block of an earlier I/O (a miss) marking the
1399 : * end of it, or a buffer that some other backend has since made
1400 : * valid by performing the I/O for us, in which case we can handle
1401 : * it as a hit now. It is safe to check for a BM_VALID flag with
1402 : * a relaxed load, because we got a fresh view of it while pinning
1403 : * it in the previous call.
1404 : *
1405 : * On the other hand if we don't see BM_VALID yet, it must be an
1406 : * I/O that was split by the previous call and we need to try to
1407 : * start a new I/O from this block. We're also racing against any
1408 : * other backend that might start the I/O or even manage to mark
1409 : * it BM_VALID after this check, but StartBufferIO() will handle
1410 : * those cases.
1411 : */
1412 1658 : if (BufferIsLocal(buffers[i]))
1413 2 : bufHdr = GetLocalBufferDescriptor(-buffers[i] - 1);
1414 : else
1415 1656 : bufHdr = GetBufferDescriptor(buffers[i] - 1);
1416 : Assert(pg_atomic_read_u64(&bufHdr->state) & BM_TAG_VALID);
1417 1658 : found = pg_atomic_read_u64(&bufHdr->state) & BM_VALID;
1418 : }
1419 : else
1420 : {
1421 64122501 : buffers[i] = PinBufferForBlock(operation->rel,
1422 : operation->smgr,
1423 64122507 : operation->persistence,
1424 : operation->forknum,
1425 : blockNum + i,
1426 : operation->strategy,
1427 : &found);
1428 : }
1429 :
1430 64124159 : if (found)
1431 : {
1432 : /*
1433 : * We have a hit. If it's the first block in the requested range,
1434 : * we can return it immediately and report that WaitReadBuffers()
1435 : * does not need to be called. If the initial value of *nblocks
1436 : * was larger, the caller will have to call again for the rest.
1437 : */
1438 62630586 : if (i == 0)
1439 : {
1440 62628926 : *nblocks = 1;
1441 :
1442 : #ifdef USE_ASSERT_CHECKING
1443 :
1444 : /*
1445 : * Initialize enough of ReadBuffersOperation to make
1446 : * CheckReadBuffersOperation() work. Outside of assertions
1447 : * that's not necessary when no IO is issued.
1448 : */
1449 : operation->buffers = buffers;
1450 : operation->blocknum = blockNum;
1451 : operation->nblocks = 1;
1452 : operation->nblocks_done = 1;
1453 : CheckReadBuffersOperation(operation, true);
1454 : #endif
1455 62628926 : return false;
1456 : }
1457 :
1458 : /*
1459 : * Otherwise we already have an I/O to perform, but this block
1460 : * can't be included as it is already valid. Split the I/O here.
1461 : * There may or may not be more blocks requiring I/O after this
1462 : * one, we haven't checked, but they can't be contiguous with this
1463 : * one in the way. We'll leave this buffer pinned, forwarding it
1464 : * to the next call, avoiding the need to unpin it here and re-pin
1465 : * it in the next call.
1466 : */
1467 1660 : actual_nblocks = i;
1468 1660 : break;
1469 : }
1470 : else
1471 : {
1472 : /*
1473 : * Check how many blocks we can cover with the same IO. The smgr
1474 : * implementation might e.g. be limited due to a segment boundary.
1475 : */
1476 1493573 : if (i == 0 && actual_nblocks > 1)
1477 : {
1478 35689 : maxcombine = smgrmaxcombine(operation->smgr,
1479 : operation->forknum,
1480 : blockNum);
1481 35689 : if (unlikely(maxcombine < actual_nblocks))
1482 : {
1483 0 : elog(DEBUG2, "limiting nblocks at %u from %u to %u",
1484 : blockNum, actual_nblocks, maxcombine);
1485 0 : actual_nblocks = maxcombine;
1486 : }
1487 : }
1488 : }
1489 : }
1490 1320288 : *nblocks = actual_nblocks;
1491 :
1492 : /* Populate information needed for I/O. */
1493 1320288 : operation->buffers = buffers;
1494 1320288 : operation->blocknum = blockNum;
1495 1320288 : operation->flags = flags;
1496 1320288 : operation->nblocks = actual_nblocks;
1497 1320288 : operation->nblocks_done = 0;
1498 1320288 : pgaio_wref_clear(&operation->io_wref);
1499 :
1500 : /*
1501 : * When using AIO, start the IO in the background. If not, issue prefetch
1502 : * requests if desired by the caller.
1503 : *
1504 : * The reason we have a dedicated path for IOMETHOD_SYNC here is to
1505 : * de-risk the introduction of AIO somewhat. It's a large architectural
1506 : * change, with lots of chances for unanticipated performance effects.
1507 : *
1508 : * Use of IOMETHOD_SYNC already leads to not actually performing IO
1509 : * asynchronously, but without the check here we'd execute IO earlier than
1510 : * we used to. Eventually this IOMETHOD_SYNC specific path should go away.
1511 : */
1512 1320288 : if (io_method != IOMETHOD_SYNC)
1513 : {
1514 : /*
1515 : * Try to start IO asynchronously. It's possible that no IO needs to
1516 : * be started, if another backend already performed the IO.
1517 : *
1518 : * Note that if an IO is started, it might not cover the entire
1519 : * requested range, e.g. because an intermediary block has been read
1520 : * in by another backend. In that case any "trailing" buffers we
1521 : * already pinned above will be "forwarded" by read_stream.c to the
1522 : * next call to StartReadBuffers().
1523 : *
1524 : * This is signalled to the caller by decrementing *nblocks *and*
1525 : * reducing operation->nblocks. The latter is done here, but not below
1526 : * WaitReadBuffers(), as in WaitReadBuffers() we can't "shorten" the
1527 : * overall read size anymore, we need to retry until done in its
1528 : * entirety or until failed.
1529 : */
1530 1319219 : did_start_io = AsyncReadBuffers(operation, nblocks);
1531 :
1532 1319204 : operation->nblocks = *nblocks;
1533 : }
1534 : else
1535 : {
1536 1069 : operation->flags |= READ_BUFFERS_SYNCHRONOUSLY;
1537 :
1538 1069 : if (flags & READ_BUFFERS_ISSUE_ADVICE)
1539 : {
1540 : /*
1541 : * In theory we should only do this if PinBufferForBlock() had to
1542 : * allocate new buffers above. That way, if two calls to
1543 : * StartReadBuffers() were made for the same blocks before
1544 : * WaitReadBuffers(), only the first would issue the advice.
1545 : * That'd be a better simulation of true asynchronous I/O, which
1546 : * would only start the I/O once, but isn't done here for
1547 : * simplicity.
1548 : */
1549 2 : smgrprefetch(operation->smgr,
1550 : operation->forknum,
1551 : blockNum,
1552 : actual_nblocks);
1553 : }
1554 :
1555 : /*
1556 : * Indicate that WaitReadBuffers() should be called. WaitReadBuffers()
1557 : * will initiate the necessary IO.
1558 : */
1559 1069 : did_start_io = true;
1560 : }
1561 :
1562 1320273 : CheckReadBuffersOperation(operation, !did_start_io);
1563 :
1564 1320273 : return did_start_io;
1565 : }
1566 :
1567 : /*
1568 : * Begin reading a range of blocks beginning at blockNum and extending for
1569 : * *nblocks. *nblocks and the buffers array are in/out parameters. On entry,
1570 : * the buffers elements covered by *nblocks must hold either InvalidBuffer or
1571 : * buffers forwarded by an earlier call to StartReadBuffers() that was split
1572 : * and is now being continued. On return, *nblocks holds the number of blocks
1573 : * accepted by this operation. If it is less than the original number then
1574 : * this operation has been split, but buffer elements up to the original
1575 : * requested size may hold forwarded buffers to be used for a continuing
1576 : * operation. The caller must either start a new I/O beginning at the block
1577 : * immediately following the blocks accepted by this call and pass those
1578 : * buffers back in, or release them if it chooses not to. It shouldn't make
1579 : * any other use of or assumptions about forwarded buffers.
1580 : *
1581 : * If false is returned, no I/O is necessary and the buffers covered by
1582 : * *nblocks on exit are valid and ready to be accessed. If true is returned,
1583 : * an I/O has been started, and WaitReadBuffers() must be called with the same
1584 : * operation object before the buffers covered by *nblocks on exit can be
1585 : * accessed. Along with the operation object, the caller-supplied array of
1586 : * buffers must remain valid until WaitReadBuffers() is called, and any
1587 : * forwarded buffers must also be preserved for a continuing call unless
1588 : * they are explicitly released.
1589 : */
1590 : bool
1591 1999800 : StartReadBuffers(ReadBuffersOperation *operation,
1592 : Buffer *buffers,
1593 : BlockNumber blockNum,
1594 : int *nblocks,
1595 : int flags)
1596 : {
1597 1999800 : return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags,
1598 : true /* expect forwarded buffers */ );
1599 : }
1600 :
1601 : /*
1602 : * Single block version of the StartReadBuffers(). This might save a few
1603 : * instructions when called from another translation unit, because it is
1604 : * specialized for nblocks == 1.
1605 : *
1606 : * This version does not support "forwarded" buffers: they cannot be created
1607 : * by reading only one block and *buffer is ignored on entry.
1608 : */
1609 : bool
1610 61949420 : StartReadBuffer(ReadBuffersOperation *operation,
1611 : Buffer *buffer,
1612 : BlockNumber blocknum,
1613 : int flags)
1614 : {
1615 61949420 : int nblocks = 1;
1616 : bool result;
1617 :
1618 61949420 : result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags,
1619 : false /* single block, no forwarding */ );
1620 : Assert(nblocks == 1); /* single block can't be short */
1621 :
1622 61949405 : return result;
1623 : }
1624 :
1625 : /*
1626 : * Perform sanity checks on the ReadBuffersOperation.
1627 : */
1628 : static void
1629 3956130 : CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete)
1630 : {
1631 : #ifdef USE_ASSERT_CHECKING
1632 : Assert(operation->nblocks_done <= operation->nblocks);
1633 : Assert(!is_complete || operation->nblocks == operation->nblocks_done);
1634 :
1635 : for (int i = 0; i < operation->nblocks; i++)
1636 : {
1637 : Buffer buffer = operation->buffers[i];
1638 : BufferDesc *buf_hdr = BufferIsLocal(buffer) ?
1639 : GetLocalBufferDescriptor(-buffer - 1) :
1640 : GetBufferDescriptor(buffer - 1);
1641 :
1642 : Assert(BufferGetBlockNumber(buffer) == operation->blocknum + i);
1643 : Assert(pg_atomic_read_u64(&buf_hdr->state) & BM_TAG_VALID);
1644 :
1645 : if (i < operation->nblocks_done)
1646 : Assert(pg_atomic_read_u64(&buf_hdr->state) & BM_VALID);
1647 : }
1648 : #endif
1649 3956130 : }
1650 :
1651 : /* helper for ReadBuffersCanStartIO(), to avoid repetition */
1652 : static inline bool
1653 1493591 : ReadBuffersCanStartIOOnce(Buffer buffer, bool nowait)
1654 : {
1655 1493591 : if (BufferIsLocal(buffer))
1656 8367 : return StartLocalBufferIO(GetLocalBufferDescriptor(-buffer - 1),
1657 : true, nowait);
1658 : else
1659 1485224 : return StartBufferIO(GetBufferDescriptor(buffer - 1), true, nowait);
1660 : }
1661 :
1662 : /*
1663 : * Helper for AsyncReadBuffers that tries to get the buffer ready for IO.
1664 : */
1665 : static inline bool
1666 1493591 : ReadBuffersCanStartIO(Buffer buffer, bool nowait)
1667 : {
1668 : /*
1669 : * If this backend currently has staged IO, we need to submit the pending
1670 : * IO before waiting for the right to issue IO, to avoid the potential for
1671 : * deadlocks (and, more commonly, unnecessary delays for other backends).
1672 : */
1673 1493591 : if (!nowait && pgaio_have_staged())
1674 : {
1675 534 : if (ReadBuffersCanStartIOOnce(buffer, true))
1676 534 : return true;
1677 :
1678 : /*
1679 : * Unfortunately StartBufferIO() returning false doesn't allow to
1680 : * distinguish between the buffer already being valid and IO already
1681 : * being in progress. Since IO already being in progress is quite
1682 : * rare, this approach seems fine.
1683 : */
1684 0 : pgaio_submit_staged();
1685 : }
1686 :
1687 1493057 : return ReadBuffersCanStartIOOnce(buffer, nowait);
1688 : }
1689 :
1690 : /*
1691 : * Helper for WaitReadBuffers() that processes the results of a readv
1692 : * operation, raising an error if necessary.
1693 : */
1694 : static void
1695 1317413 : ProcessReadBuffersResult(ReadBuffersOperation *operation)
1696 : {
1697 1317413 : PgAioReturn *aio_ret = &operation->io_return;
1698 1317413 : PgAioResultStatus rs = aio_ret->result.status;
1699 1317413 : int newly_read_blocks = 0;
1700 :
1701 : Assert(pgaio_wref_valid(&operation->io_wref));
1702 : Assert(aio_ret->result.status != PGAIO_RS_UNKNOWN);
1703 :
1704 : /*
1705 : * SMGR reports the number of blocks successfully read as the result of
1706 : * the IO operation. Thus we can simply add that to ->nblocks_done.
1707 : */
1708 :
1709 1317413 : if (likely(rs != PGAIO_RS_ERROR))
1710 1317384 : newly_read_blocks = aio_ret->result.result;
1711 :
1712 1317413 : if (rs == PGAIO_RS_ERROR || rs == PGAIO_RS_WARNING)
1713 45 : pgaio_result_report(aio_ret->result, &aio_ret->target_data,
1714 : rs == PGAIO_RS_ERROR ? ERROR : WARNING);
1715 1317368 : else if (aio_ret->result.status == PGAIO_RS_PARTIAL)
1716 : {
1717 : /*
1718 : * We'll retry, so we just emit a debug message to the server log (or
1719 : * not even that in prod scenarios).
1720 : */
1721 10 : pgaio_result_report(aio_ret->result, &aio_ret->target_data, DEBUG1);
1722 10 : elog(DEBUG3, "partial read, will retry");
1723 : }
1724 :
1725 : Assert(newly_read_blocks > 0);
1726 : Assert(newly_read_blocks <= MAX_IO_COMBINE_LIMIT);
1727 :
1728 1317384 : operation->nblocks_done += newly_read_blocks;
1729 :
1730 : Assert(operation->nblocks_done <= operation->nblocks);
1731 1317384 : }
1732 :
1733 : void
1734 1317404 : WaitReadBuffers(ReadBuffersOperation *operation)
1735 : {
1736 1317404 : PgAioReturn *aio_ret = &operation->io_return;
1737 : IOContext io_context;
1738 : IOObject io_object;
1739 :
1740 1317404 : if (operation->persistence == RELPERSISTENCE_TEMP)
1741 : {
1742 1490 : io_context = IOCONTEXT_NORMAL;
1743 1490 : io_object = IOOBJECT_TEMP_RELATION;
1744 : }
1745 : else
1746 : {
1747 1315914 : io_context = IOContextForStrategy(operation->strategy);
1748 1315914 : io_object = IOOBJECT_RELATION;
1749 : }
1750 :
1751 : /*
1752 : * If we get here without an IO operation having been issued, the
1753 : * io_method == IOMETHOD_SYNC path must have been used. Otherwise the
1754 : * caller should not have called WaitReadBuffers().
1755 : *
1756 : * In the case of IOMETHOD_SYNC, we start - as we used to before the
1757 : * introducing of AIO - the IO in WaitReadBuffers(). This is done as part
1758 : * of the retry logic below, no extra code is required.
1759 : *
1760 : * This path is expected to eventually go away.
1761 : */
1762 1317404 : if (!pgaio_wref_valid(&operation->io_wref) && io_method != IOMETHOD_SYNC)
1763 0 : elog(ERROR, "waiting for read operation that didn't read");
1764 :
1765 : /*
1766 : * To handle partial reads, and IOMETHOD_SYNC, we re-issue IO until we're
1767 : * done. We may need multiple retries, not just because we could get
1768 : * multiple partial reads, but also because some of the remaining
1769 : * to-be-read buffers may have been read in by other backends, limiting
1770 : * the IO size.
1771 : */
1772 : while (true)
1773 1079 : {
1774 : int ignored_nblocks_progress;
1775 :
1776 1318483 : CheckReadBuffersOperation(operation, false);
1777 :
1778 : /*
1779 : * If there is an IO associated with the operation, we may need to
1780 : * wait for it.
1781 : */
1782 1318483 : if (pgaio_wref_valid(&operation->io_wref))
1783 : {
1784 : /*
1785 : * Track the time spent waiting for the IO to complete. As
1786 : * tracking a wait even if we don't actually need to wait
1787 : *
1788 : * a) is not cheap, due to the timestamping overhead
1789 : *
1790 : * b) reports some time as waiting, even if we never waited
1791 : *
1792 : * we first check if we already know the IO is complete.
1793 : */
1794 1317414 : if (aio_ret->result.status == PGAIO_RS_UNKNOWN &&
1795 602191 : !pgaio_wref_check_done(&operation->io_wref))
1796 : {
1797 287828 : instr_time io_start = pgstat_prepare_io_time(track_io_timing);
1798 :
1799 287828 : pgaio_wref_wait(&operation->io_wref);
1800 :
1801 : /*
1802 : * The IO operation itself was already counted earlier, in
1803 : * AsyncReadBuffers(), this just accounts for the wait time.
1804 : */
1805 287827 : pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
1806 : io_start, 0, 0);
1807 : }
1808 : else
1809 : {
1810 : Assert(pgaio_wref_check_done(&operation->io_wref));
1811 : }
1812 :
1813 : /*
1814 : * We now are sure the IO completed. Check the results. This
1815 : * includes reporting on errors if there were any.
1816 : */
1817 1317413 : ProcessReadBuffersResult(operation);
1818 : }
1819 :
1820 : /*
1821 : * Most of the time, the one IO we already started, will read in
1822 : * everything. But we need to deal with partial reads and buffers not
1823 : * needing IO anymore.
1824 : */
1825 1318453 : if (operation->nblocks_done == operation->nblocks)
1826 1317374 : break;
1827 :
1828 1079 : CHECK_FOR_INTERRUPTS();
1829 :
1830 : /*
1831 : * This may only complete the IO partially, either because some
1832 : * buffers were already valid, or because of a partial read.
1833 : *
1834 : * NB: In contrast to after the AsyncReadBuffers() call in
1835 : * StartReadBuffers(), we do *not* reduce
1836 : * ReadBuffersOperation->nblocks here, callers expect the full
1837 : * operation to be completed at this point (as more operations may
1838 : * have been queued).
1839 : */
1840 1079 : AsyncReadBuffers(operation, &ignored_nblocks_progress);
1841 : }
1842 :
1843 1317374 : CheckReadBuffersOperation(operation, true);
1844 :
1845 : /* NB: READ_DONE tracepoint was already executed in completion callback */
1846 1317374 : }
1847 :
1848 : /*
1849 : * Initiate IO for the ReadBuffersOperation
1850 : *
1851 : * This function only starts a single IO at a time. The size of the IO may be
1852 : * limited to below the to-be-read blocks, if one of the buffers has
1853 : * concurrently been read in. If the first to-be-read buffer is already valid,
1854 : * no IO will be issued.
1855 : *
1856 : * To support retries after partial reads, the first operation->nblocks_done
1857 : * buffers are skipped.
1858 : *
1859 : * On return *nblocks_progress is updated to reflect the number of buffers
1860 : * affected by the call. If the first buffer is valid, *nblocks_progress is
1861 : * set to 1 and operation->nblocks_done is incremented.
1862 : *
1863 : * Returns true if IO was initiated, false if no IO was necessary.
1864 : */
1865 : static bool
1866 1320298 : AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress)
1867 : {
1868 1320298 : Buffer *buffers = &operation->buffers[0];
1869 1320298 : int flags = operation->flags;
1870 1320298 : BlockNumber blocknum = operation->blocknum;
1871 1320298 : ForkNumber forknum = operation->forknum;
1872 1320298 : char persistence = operation->persistence;
1873 1320298 : int16 nblocks_done = operation->nblocks_done;
1874 1320298 : Buffer *io_buffers = &operation->buffers[nblocks_done];
1875 1320298 : int io_buffers_len = 0;
1876 : PgAioHandle *ioh;
1877 1320298 : uint32 ioh_flags = 0;
1878 : void *io_pages[MAX_IO_COMBINE_LIMIT];
1879 : IOContext io_context;
1880 : IOObject io_object;
1881 : bool did_start_io;
1882 :
1883 : /*
1884 : * When this IO is executed synchronously, either because the caller will
1885 : * immediately block waiting for the IO or because IOMETHOD_SYNC is used,
1886 : * the AIO subsystem needs to know.
1887 : */
1888 1320298 : if (flags & READ_BUFFERS_SYNCHRONOUSLY)
1889 708766 : ioh_flags |= PGAIO_HF_SYNCHRONOUS;
1890 :
1891 1320298 : if (persistence == RELPERSISTENCE_TEMP)
1892 : {
1893 1784 : io_context = IOCONTEXT_NORMAL;
1894 1784 : io_object = IOOBJECT_TEMP_RELATION;
1895 1784 : ioh_flags |= PGAIO_HF_REFERENCES_LOCAL;
1896 : }
1897 : else
1898 : {
1899 1318514 : io_context = IOContextForStrategy(operation->strategy);
1900 1318514 : io_object = IOOBJECT_RELATION;
1901 : }
1902 :
1903 : /*
1904 : * If zero_damaged_pages is enabled, add the READ_BUFFERS_ZERO_ON_ERROR
1905 : * flag. The reason for that is that, hopefully, zero_damaged_pages isn't
1906 : * set globally, but on a per-session basis. The completion callback,
1907 : * which may be run in other processes, e.g. in IO workers, may have a
1908 : * different value of the zero_damaged_pages GUC.
1909 : *
1910 : * XXX: We probably should eventually use a different flag for
1911 : * zero_damaged_pages, so we can report different log levels / error codes
1912 : * for zero_damaged_pages and ZERO_ON_ERROR.
1913 : */
1914 1320298 : if (zero_damaged_pages)
1915 16 : flags |= READ_BUFFERS_ZERO_ON_ERROR;
1916 :
1917 : /*
1918 : * For the same reason as with zero_damaged_pages we need to use this
1919 : * backend's ignore_checksum_failure value.
1920 : */
1921 1320298 : if (ignore_checksum_failure)
1922 8 : flags |= READ_BUFFERS_IGNORE_CHECKSUM_FAILURES;
1923 :
1924 :
1925 : /*
1926 : * To be allowed to report stats in the local completion callback we need
1927 : * to prepare to report stats now. This ensures we can safely report the
1928 : * checksum failure even in a critical section.
1929 : */
1930 1320298 : pgstat_prepare_report_checksum_failure(operation->smgr->smgr_rlocator.locator.dbOid);
1931 :
1932 : /*
1933 : * Get IO handle before ReadBuffersCanStartIO(), as pgaio_io_acquire()
1934 : * might block, which we don't want after setting IO_IN_PROGRESS.
1935 : *
1936 : * If we need to wait for IO before we can get a handle, submit
1937 : * already-staged IO first, so that other backends don't need to wait.
1938 : * There wouldn't be a deadlock risk, as pgaio_io_acquire() just needs to
1939 : * wait for already submitted IO, which doesn't require additional locks,
1940 : * but it could still cause undesirable waits.
1941 : *
1942 : * A secondary benefit is that this would allow us to measure the time in
1943 : * pgaio_io_acquire() without causing undue timer overhead in the common,
1944 : * non-blocking, case. However, currently the pgstats infrastructure
1945 : * doesn't really allow that, as it a) asserts that an operation can't
1946 : * have time without operations b) doesn't have an API to report
1947 : * "accumulated" time.
1948 : */
1949 1320298 : ioh = pgaio_io_acquire_nb(CurrentResourceOwner, &operation->io_return);
1950 1320298 : if (unlikely(!ioh))
1951 : {
1952 3132 : pgaio_submit_staged();
1953 :
1954 3132 : ioh = pgaio_io_acquire(CurrentResourceOwner, &operation->io_return);
1955 : }
1956 :
1957 : /*
1958 : * Check if we can start IO on the first to-be-read buffer.
1959 : *
1960 : * If an I/O is already in progress in another backend, we want to wait
1961 : * for the outcome: either done, or something went wrong and we will
1962 : * retry.
1963 : */
1964 1320298 : if (!ReadBuffersCanStartIO(buffers[nblocks_done], false))
1965 : {
1966 : /*
1967 : * Someone else has already completed this block, we're done.
1968 : *
1969 : * When IO is necessary, ->nblocks_done is updated in
1970 : * ProcessReadBuffersResult(), but that is not called if no IO is
1971 : * necessary. Thus update here.
1972 : */
1973 2570 : operation->nblocks_done += 1;
1974 2570 : *nblocks_progress = 1;
1975 :
1976 2570 : pgaio_io_release(ioh);
1977 2570 : pgaio_wref_clear(&operation->io_wref);
1978 2570 : did_start_io = false;
1979 :
1980 : /*
1981 : * Report and track this as a 'hit' for this backend, even though it
1982 : * must have started out as a miss in PinBufferForBlock(). The other
1983 : * backend will track this as a 'read'.
1984 : */
1985 : TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + operation->nblocks_done,
1986 : operation->smgr->smgr_rlocator.locator.spcOid,
1987 : operation->smgr->smgr_rlocator.locator.dbOid,
1988 : operation->smgr->smgr_rlocator.locator.relNumber,
1989 : operation->smgr->smgr_rlocator.backend,
1990 : true);
1991 :
1992 2570 : if (persistence == RELPERSISTENCE_TEMP)
1993 0 : pgBufferUsage.local_blks_hit += 1;
1994 : else
1995 2570 : pgBufferUsage.shared_blks_hit += 1;
1996 :
1997 2570 : if (operation->rel)
1998 2570 : pgstat_count_buffer_hit(operation->rel);
1999 :
2000 2570 : pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
2001 :
2002 2570 : if (VacuumCostActive)
2003 9 : VacuumCostBalance += VacuumCostPageHit;
2004 : }
2005 : else
2006 : {
2007 : instr_time io_start;
2008 :
2009 : /* We found a buffer that we need to read in. */
2010 : Assert(io_buffers[0] == buffers[nblocks_done]);
2011 1317728 : io_pages[0] = BufferGetBlock(buffers[nblocks_done]);
2012 1317728 : io_buffers_len = 1;
2013 :
2014 : /*
2015 : * How many neighboring-on-disk blocks can we scatter-read into other
2016 : * buffers at the same time? In this case we don't wait if we see an
2017 : * I/O already in progress. We already set BM_IO_IN_PROGRESS for the
2018 : * head block, so we should get on with that I/O as soon as possible.
2019 : */
2020 1491021 : for (int i = nblocks_done + 1; i < operation->nblocks; i++)
2021 : {
2022 173293 : if (!ReadBuffersCanStartIO(buffers[i], true))
2023 0 : break;
2024 : /* Must be consecutive block numbers. */
2025 : Assert(BufferGetBlockNumber(buffers[i - 1]) ==
2026 : BufferGetBlockNumber(buffers[i]) - 1);
2027 : Assert(io_buffers[io_buffers_len] == buffers[i]);
2028 :
2029 173293 : io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
2030 : }
2031 :
2032 : /* get a reference to wait for in WaitReadBuffers() */
2033 1317728 : pgaio_io_get_wref(ioh, &operation->io_wref);
2034 :
2035 : /* provide the list of buffers to the completion callbacks */
2036 1317728 : pgaio_io_set_handle_data_32(ioh, (uint32 *) io_buffers, io_buffers_len);
2037 :
2038 1317728 : pgaio_io_register_callbacks(ioh,
2039 : persistence == RELPERSISTENCE_TEMP ?
2040 : PGAIO_HCB_LOCAL_BUFFER_READV :
2041 : PGAIO_HCB_SHARED_BUFFER_READV,
2042 : flags);
2043 :
2044 1317728 : pgaio_io_set_flag(ioh, ioh_flags);
2045 :
2046 : /* ---
2047 : * Even though we're trying to issue IO asynchronously, track the time
2048 : * in smgrstartreadv():
2049 : * - if io_method == IOMETHOD_SYNC, we will always perform the IO
2050 : * immediately
2051 : * - the io method might not support the IO (e.g. worker IO for a temp
2052 : * table)
2053 : * ---
2054 : */
2055 1317728 : io_start = pgstat_prepare_io_time(track_io_timing);
2056 1317728 : smgrstartreadv(ioh, operation->smgr, forknum,
2057 : blocknum + nblocks_done,
2058 : io_pages, io_buffers_len);
2059 1317713 : pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
2060 1317713 : io_start, 1, io_buffers_len * BLCKSZ);
2061 :
2062 1317713 : if (persistence == RELPERSISTENCE_TEMP)
2063 1784 : pgBufferUsage.local_blks_read += io_buffers_len;
2064 : else
2065 1315929 : pgBufferUsage.shared_blks_read += io_buffers_len;
2066 :
2067 : /*
2068 : * Track vacuum cost when issuing IO, not after waiting for it.
2069 : * Otherwise we could end up issuing a lot of IO in a short timespan,
2070 : * despite a low cost limit.
2071 : */
2072 1317713 : if (VacuumCostActive)
2073 23475 : VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
2074 :
2075 1317713 : *nblocks_progress = io_buffers_len;
2076 1317713 : did_start_io = true;
2077 : }
2078 :
2079 1320283 : return did_start_io;
2080 : }
2081 :
2082 : /*
2083 : * BufferAlloc -- subroutine for PinBufferForBlock. Handles lookup of a shared
2084 : * buffer. If no buffer exists already, selects a replacement victim and
2085 : * evicts the old page, but does NOT read in new page.
2086 : *
2087 : * "strategy" can be a buffer replacement strategy object, or NULL for
2088 : * the default strategy. The selected buffer's usage_count is advanced when
2089 : * using the default strategy, but otherwise possibly not (see PinBuffer).
2090 : *
2091 : * The returned buffer is pinned and is already marked as holding the
2092 : * desired page. If it already did have the desired page, *foundPtr is
2093 : * set true. Otherwise, *foundPtr is set false.
2094 : *
2095 : * io_context is passed as an output parameter to avoid calling
2096 : * IOContextForStrategy() when there is a shared buffers hit and no IO
2097 : * statistics need be captured.
2098 : *
2099 : * No locks are held either at entry or exit.
2100 : */
2101 : static pg_attribute_always_inline BufferDesc *
2102 63170624 : BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
2103 : BlockNumber blockNum,
2104 : BufferAccessStrategy strategy,
2105 : bool *foundPtr, IOContext io_context)
2106 : {
2107 : BufferTag newTag; /* identity of requested block */
2108 : uint32 newHash; /* hash value for newTag */
2109 : LWLock *newPartitionLock; /* buffer partition lock for it */
2110 : int existing_buf_id;
2111 : Buffer victim_buffer;
2112 : BufferDesc *victim_buf_hdr;
2113 : uint64 victim_buf_state;
2114 63170624 : uint64 set_bits = 0;
2115 :
2116 : /* Make sure we will have room to remember the buffer pin */
2117 63170624 : ResourceOwnerEnlarge(CurrentResourceOwner);
2118 63170624 : ReservePrivateRefCountEntry();
2119 :
2120 : /* create a tag so we can lookup the buffer */
2121 63170624 : InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
2122 :
2123 : /* determine its hash code and partition lock ID */
2124 63170624 : newHash = BufTableHashCode(&newTag);
2125 63170624 : newPartitionLock = BufMappingPartitionLock(newHash);
2126 :
2127 : /* see if the block is in the buffer pool already */
2128 63170624 : LWLockAcquire(newPartitionLock, LW_SHARED);
2129 63170624 : existing_buf_id = BufTableLookup(&newTag, newHash);
2130 63170624 : if (existing_buf_id >= 0)
2131 : {
2132 : BufferDesc *buf;
2133 : bool valid;
2134 :
2135 : /*
2136 : * Found it. Now, pin the buffer so no one can steal it from the
2137 : * buffer pool, and check to see if the correct data has been loaded
2138 : * into the buffer.
2139 : */
2140 61396446 : buf = GetBufferDescriptor(existing_buf_id);
2141 :
2142 61396446 : valid = PinBuffer(buf, strategy, false);
2143 :
2144 : /* Can release the mapping lock as soon as we've pinned it */
2145 61396446 : LWLockRelease(newPartitionLock);
2146 :
2147 61396446 : *foundPtr = true;
2148 :
2149 61396446 : if (!valid)
2150 : {
2151 : /*
2152 : * We can only get here if (a) someone else is still reading in
2153 : * the page, (b) a previous read attempt failed, or (c) someone
2154 : * called StartReadBuffers() but not yet WaitReadBuffers().
2155 : */
2156 2231 : *foundPtr = false;
2157 : }
2158 :
2159 61396446 : return buf;
2160 : }
2161 :
2162 : /*
2163 : * Didn't find it in the buffer pool. We'll have to initialize a new
2164 : * buffer. Remember to unlock the mapping lock while doing the work.
2165 : */
2166 1774178 : LWLockRelease(newPartitionLock);
2167 :
2168 : /*
2169 : * Acquire a victim buffer. Somebody else might try to do the same, we
2170 : * don't hold any conflicting locks. If so we'll have to undo our work
2171 : * later.
2172 : */
2173 1774178 : victim_buffer = GetVictimBuffer(strategy, io_context);
2174 1774178 : victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
2175 :
2176 : /*
2177 : * Try to make a hashtable entry for the buffer under its new tag. If
2178 : * somebody else inserted another buffer for the tag, we'll release the
2179 : * victim buffer we acquired and use the already inserted one.
2180 : */
2181 1774178 : LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
2182 1774178 : existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
2183 1774178 : if (existing_buf_id >= 0)
2184 : {
2185 : BufferDesc *existing_buf_hdr;
2186 : bool valid;
2187 :
2188 : /*
2189 : * Got a collision. Someone has already done what we were about to do.
2190 : * We'll just handle this as if it were found in the buffer pool in
2191 : * the first place. First, give up the buffer we were planning to
2192 : * use.
2193 : *
2194 : * We could do this after releasing the partition lock, but then we'd
2195 : * have to call ResourceOwnerEnlarge() & ReservePrivateRefCountEntry()
2196 : * before acquiring the lock, for the rare case of such a collision.
2197 : */
2198 602 : UnpinBuffer(victim_buf_hdr);
2199 :
2200 : /* remaining code should match code at top of routine */
2201 :
2202 602 : existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
2203 :
2204 602 : valid = PinBuffer(existing_buf_hdr, strategy, false);
2205 :
2206 : /* Can release the mapping lock as soon as we've pinned it */
2207 602 : LWLockRelease(newPartitionLock);
2208 :
2209 602 : *foundPtr = true;
2210 :
2211 602 : if (!valid)
2212 : {
2213 : /*
2214 : * We can only get here if (a) someone else is still reading in
2215 : * the page, (b) a previous read attempt failed, or (c) someone
2216 : * called StartReadBuffers() but not yet WaitReadBuffers().
2217 : */
2218 387 : *foundPtr = false;
2219 : }
2220 :
2221 602 : return existing_buf_hdr;
2222 : }
2223 :
2224 : /*
2225 : * Need to lock the buffer header too in order to change its tag.
2226 : */
2227 1773576 : victim_buf_state = LockBufHdr(victim_buf_hdr);
2228 :
2229 : /* some sanity checks while we hold the buffer header lock */
2230 : Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
2231 : Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
2232 :
2233 1773576 : victim_buf_hdr->tag = newTag;
2234 :
2235 : /*
2236 : * Make sure BM_PERMANENT is set for buffers that must be written at every
2237 : * checkpoint. Unlogged buffers only need to be written at shutdown
2238 : * checkpoints, except for their "init" forks, which need to be treated
2239 : * just like permanent relations.
2240 : */
2241 1773576 : set_bits |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
2242 1773576 : if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
2243 1773220 : set_bits |= BM_PERMANENT;
2244 :
2245 1773576 : UnlockBufHdrExt(victim_buf_hdr, victim_buf_state,
2246 : set_bits, 0, 0);
2247 :
2248 1773576 : LWLockRelease(newPartitionLock);
2249 :
2250 : /*
2251 : * Buffer contents are currently invalid.
2252 : */
2253 1773576 : *foundPtr = false;
2254 :
2255 1773576 : return victim_buf_hdr;
2256 : }
2257 :
2258 : /*
2259 : * InvalidateBuffer -- mark a shared buffer invalid.
2260 : *
2261 : * The buffer header spinlock must be held at entry. We drop it before
2262 : * returning. (This is sane because the caller must have locked the
2263 : * buffer in order to be sure it should be dropped.)
2264 : *
2265 : * This is used only in contexts such as dropping a relation. We assume
2266 : * that no other backend could possibly be interested in using the page,
2267 : * so the only reason the buffer might be pinned is if someone else is
2268 : * trying to write it out. We have to let them finish before we can
2269 : * reclaim the buffer.
2270 : *
2271 : * The buffer could get reclaimed by someone else while we are waiting
2272 : * to acquire the necessary locks; if so, don't mess it up.
2273 : */
2274 : static void
2275 107129 : InvalidateBuffer(BufferDesc *buf)
2276 : {
2277 : BufferTag oldTag;
2278 : uint32 oldHash; /* hash value for oldTag */
2279 : LWLock *oldPartitionLock; /* buffer partition lock for it */
2280 : uint32 oldFlags;
2281 : uint64 buf_state;
2282 :
2283 : /* Save the original buffer tag before dropping the spinlock */
2284 107129 : oldTag = buf->tag;
2285 :
2286 107129 : UnlockBufHdr(buf);
2287 :
2288 : /*
2289 : * Need to compute the old tag's hashcode and partition lock ID. XXX is it
2290 : * worth storing the hashcode in BufferDesc so we need not recompute it
2291 : * here? Probably not.
2292 : */
2293 107129 : oldHash = BufTableHashCode(&oldTag);
2294 107129 : oldPartitionLock = BufMappingPartitionLock(oldHash);
2295 :
2296 110307 : retry:
2297 :
2298 : /*
2299 : * Acquire exclusive mapping lock in preparation for changing the buffer's
2300 : * association.
2301 : */
2302 110307 : LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
2303 :
2304 : /* Re-lock the buffer header */
2305 110307 : buf_state = LockBufHdr(buf);
2306 :
2307 : /* If it's changed while we were waiting for lock, do nothing */
2308 110307 : if (!BufferTagsEqual(&buf->tag, &oldTag))
2309 : {
2310 7 : UnlockBufHdr(buf);
2311 7 : LWLockRelease(oldPartitionLock);
2312 7 : return;
2313 : }
2314 :
2315 : /*
2316 : * We assume the reason for it to be pinned is that either we were
2317 : * asynchronously reading the page in before erroring out or someone else
2318 : * is flushing the page out. Wait for the IO to finish. (This could be
2319 : * an infinite loop if the refcount is messed up... it would be nice to
2320 : * time out after awhile, but there seems no way to be sure how many loops
2321 : * may be needed. Note that if the other guy has pinned the buffer but
2322 : * not yet done StartBufferIO, WaitIO will fall through and we'll
2323 : * effectively be busy-looping here.)
2324 : */
2325 110300 : if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
2326 : {
2327 3178 : UnlockBufHdr(buf);
2328 3178 : LWLockRelease(oldPartitionLock);
2329 : /* safety check: should definitely not be our *own* pin */
2330 3178 : if (GetPrivateRefCount(BufferDescriptorGetBuffer(buf)) > 0)
2331 0 : elog(ERROR, "buffer is pinned in InvalidateBuffer");
2332 3178 : WaitIO(buf);
2333 3178 : goto retry;
2334 : }
2335 :
2336 : /*
2337 : * An invalidated buffer should not have any backends waiting to lock the
2338 : * buffer, therefore BM_LOCK_WAKE_IN_PROGRESS should not be set.
2339 : */
2340 : Assert(!(buf_state & BM_LOCK_WAKE_IN_PROGRESS));
2341 :
2342 : /*
2343 : * Clear out the buffer's tag and flags. We must do this to ensure that
2344 : * linear scans of the buffer array don't think the buffer is valid.
2345 : */
2346 107122 : oldFlags = buf_state & BUF_FLAG_MASK;
2347 107122 : ClearBufferTag(&buf->tag);
2348 :
2349 107122 : UnlockBufHdrExt(buf, buf_state,
2350 : 0,
2351 : BUF_FLAG_MASK | BUF_USAGECOUNT_MASK,
2352 : 0);
2353 :
2354 : /*
2355 : * Remove the buffer from the lookup hashtable, if it was in there.
2356 : */
2357 107122 : if (oldFlags & BM_TAG_VALID)
2358 107122 : BufTableDelete(&oldTag, oldHash);
2359 :
2360 : /*
2361 : * Done with mapping lock.
2362 : */
2363 107122 : LWLockRelease(oldPartitionLock);
2364 : }
2365 :
2366 : /*
2367 : * Helper routine for GetVictimBuffer()
2368 : *
2369 : * Needs to be called on a buffer with a valid tag, pinned, but without the
2370 : * buffer header spinlock held.
2371 : *
2372 : * Returns true if the buffer can be reused, in which case the buffer is only
2373 : * pinned by this backend and marked as invalid, false otherwise.
2374 : */
2375 : static bool
2376 1248251 : InvalidateVictimBuffer(BufferDesc *buf_hdr)
2377 : {
2378 : uint64 buf_state;
2379 : uint32 hash;
2380 : LWLock *partition_lock;
2381 : BufferTag tag;
2382 :
2383 : Assert(GetPrivateRefCount(BufferDescriptorGetBuffer(buf_hdr)) == 1);
2384 :
2385 : /* have buffer pinned, so it's safe to read tag without lock */
2386 1248251 : tag = buf_hdr->tag;
2387 :
2388 1248251 : hash = BufTableHashCode(&tag);
2389 1248251 : partition_lock = BufMappingPartitionLock(hash);
2390 :
2391 1248251 : LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2392 :
2393 : /* lock the buffer header */
2394 1248251 : buf_state = LockBufHdr(buf_hdr);
2395 :
2396 : /*
2397 : * We have the buffer pinned nobody else should have been able to unset
2398 : * this concurrently.
2399 : */
2400 : Assert(buf_state & BM_TAG_VALID);
2401 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2402 : Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
2403 :
2404 : /*
2405 : * If somebody else pinned the buffer since, or even worse, dirtied it,
2406 : * give up on this buffer: It's clearly in use.
2407 : */
2408 1248251 : if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
2409 : {
2410 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2411 :
2412 406 : UnlockBufHdr(buf_hdr);
2413 406 : LWLockRelease(partition_lock);
2414 :
2415 406 : return false;
2416 : }
2417 :
2418 : /*
2419 : * An invalidated buffer should not have any backends waiting to lock the
2420 : * buffer, therefore BM_LOCK_WAKE_IN_PROGRESS should not be set.
2421 : */
2422 : Assert(!(buf_state & BM_LOCK_WAKE_IN_PROGRESS));
2423 :
2424 : /*
2425 : * Clear out the buffer's tag and flags and usagecount. This is not
2426 : * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
2427 : * doing anything with the buffer. But currently it's beneficial, as the
2428 : * cheaper pre-check for several linear scans of shared buffers use the
2429 : * tag (see e.g. FlushDatabaseBuffers()).
2430 : */
2431 1247845 : ClearBufferTag(&buf_hdr->tag);
2432 1247845 : UnlockBufHdrExt(buf_hdr, buf_state,
2433 : 0,
2434 : BUF_FLAG_MASK | BUF_USAGECOUNT_MASK,
2435 : 0);
2436 :
2437 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2438 :
2439 : /* finally delete buffer from the buffer mapping table */
2440 1247845 : BufTableDelete(&tag, hash);
2441 :
2442 1247845 : LWLockRelease(partition_lock);
2443 :
2444 1247845 : buf_state = pg_atomic_read_u64(&buf_hdr->state);
2445 : Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
2446 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2447 : Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u64(&buf_hdr->state)) > 0);
2448 :
2449 1247845 : return true;
2450 : }
2451 :
2452 : static Buffer
2453 2000502 : GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
2454 : {
2455 : BufferDesc *buf_hdr;
2456 : Buffer buf;
2457 : uint64 buf_state;
2458 : bool from_ring;
2459 :
2460 : /*
2461 : * Ensure, before we pin a victim buffer, that there's a free refcount
2462 : * entry and resource owner slot for the pin.
2463 : */
2464 2000502 : ReservePrivateRefCountEntry();
2465 2000502 : ResourceOwnerEnlarge(CurrentResourceOwner);
2466 :
2467 : /* we return here if a prospective victim buffer gets used concurrently */
2468 5923 : again:
2469 :
2470 : /*
2471 : * Select a victim buffer. The buffer is returned pinned and owned by
2472 : * this backend.
2473 : */
2474 2006425 : buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
2475 2006425 : buf = BufferDescriptorGetBuffer(buf_hdr);
2476 :
2477 : /*
2478 : * We shouldn't have any other pins for this buffer.
2479 : */
2480 2006425 : CheckBufferIsPinnedOnce(buf);
2481 :
2482 : /*
2483 : * If the buffer was dirty, try to write it out. There is a race
2484 : * condition here, another backend could dirty the buffer between
2485 : * StrategyGetBuffer() checking that it is not in use and invalidating the
2486 : * buffer below. That's addressed by InvalidateVictimBuffer() verifying
2487 : * that the buffer is not dirty.
2488 : */
2489 2006425 : if (buf_state & BM_DIRTY)
2490 : {
2491 : Assert(buf_state & BM_TAG_VALID);
2492 : Assert(buf_state & BM_VALID);
2493 :
2494 : /*
2495 : * We need a share-exclusive lock on the buffer contents to write it
2496 : * out (else we might write invalid data, eg because someone else is
2497 : * compacting the page contents while we write). We must use a
2498 : * conditional lock acquisition here to avoid deadlock. Even though
2499 : * the buffer was not pinned (and therefore surely not locked) when
2500 : * StrategyGetBuffer returned it, someone else could have pinned and
2501 : * (share-)exclusive-locked it by the time we get here. If we try to
2502 : * get the lock unconditionally, we'd block waiting for them; if they
2503 : * later block waiting for us, deadlock ensues. (This has been
2504 : * observed to happen when two backends are both trying to split btree
2505 : * index pages, and the second one just happens to be trying to split
2506 : * the page the first one got from StrategyGetBuffer.)
2507 : */
2508 278604 : if (!BufferLockConditional(buf, buf_hdr, BUFFER_LOCK_SHARE_EXCLUSIVE))
2509 : {
2510 : /*
2511 : * Someone else has locked the buffer, so give it up and loop back
2512 : * to get another one.
2513 : */
2514 0 : UnpinBuffer(buf_hdr);
2515 0 : goto again;
2516 : }
2517 :
2518 : /*
2519 : * If using a nondefault strategy, and writing the buffer would
2520 : * require a WAL flush, let the strategy decide whether to go ahead
2521 : * and write/reuse the buffer or to choose another victim. We need to
2522 : * hold the content lock in at least share-exclusive mode to safely
2523 : * inspect the page LSN, so this couldn't have been done inside
2524 : * StrategyGetBuffer.
2525 : */
2526 278604 : if (strategy != NULL)
2527 : {
2528 78528 : XLogRecPtr lsn = BufferGetLSN(buf_hdr);
2529 :
2530 78528 : if (XLogNeedsFlush(lsn)
2531 8814 : && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
2532 : {
2533 5517 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
2534 5517 : UnpinBuffer(buf_hdr);
2535 5517 : goto again;
2536 : }
2537 : }
2538 :
2539 : /* OK, do the I/O */
2540 273087 : FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
2541 273087 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
2542 :
2543 273087 : ScheduleBufferTagForWriteback(&BackendWritebackContext, io_context,
2544 : &buf_hdr->tag);
2545 : }
2546 :
2547 :
2548 2000908 : if (buf_state & BM_VALID)
2549 : {
2550 : /*
2551 : * When a BufferAccessStrategy is in use, blocks evicted from shared
2552 : * buffers are counted as IOOP_EVICT in the corresponding context
2553 : * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
2554 : * strategy in two cases: 1) while initially claiming buffers for the
2555 : * strategy ring 2) to replace an existing strategy ring buffer
2556 : * because it is pinned or in use and cannot be reused.
2557 : *
2558 : * Blocks evicted from buffers already in the strategy ring are
2559 : * counted as IOOP_REUSE in the corresponding strategy context.
2560 : *
2561 : * At this point, we can accurately count evictions and reuses,
2562 : * because we have successfully claimed the valid buffer. Previously,
2563 : * we may have been forced to release the buffer due to concurrent
2564 : * pinners or erroring out.
2565 : */
2566 1246122 : pgstat_count_io_op(IOOBJECT_RELATION, io_context,
2567 1246122 : from_ring ? IOOP_REUSE : IOOP_EVICT, 1, 0);
2568 : }
2569 :
2570 : /*
2571 : * If the buffer has an entry in the buffer mapping table, delete it. This
2572 : * can fail because another backend could have pinned or dirtied the
2573 : * buffer.
2574 : */
2575 2000908 : if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
2576 : {
2577 406 : UnpinBuffer(buf_hdr);
2578 406 : goto again;
2579 : }
2580 :
2581 : /* a final set of sanity checks */
2582 : #ifdef USE_ASSERT_CHECKING
2583 : buf_state = pg_atomic_read_u64(&buf_hdr->state);
2584 :
2585 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2586 : Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
2587 :
2588 : CheckBufferIsPinnedOnce(buf);
2589 : #endif
2590 :
2591 2000502 : return buf;
2592 : }
2593 :
2594 : /*
2595 : * Return the maximum number of buffers that a backend should try to pin once,
2596 : * to avoid exceeding its fair share. This is the highest value that
2597 : * GetAdditionalPinLimit() could ever return. Note that it may be zero on a
2598 : * system with a very small buffer pool relative to max_connections.
2599 : */
2600 : uint32
2601 679523 : GetPinLimit(void)
2602 : {
2603 679523 : return MaxProportionalPins;
2604 : }
2605 :
2606 : /*
2607 : * Return the maximum number of additional buffers that this backend should
2608 : * pin if it wants to stay under the per-backend limit, considering the number
2609 : * of buffers it has already pinned. Unlike LimitAdditionalPins(), the limit
2610 : * return by this function can be zero.
2611 : */
2612 : uint32
2613 3988401 : GetAdditionalPinLimit(void)
2614 : {
2615 : uint32 estimated_pins_held;
2616 :
2617 : /*
2618 : * We get the number of "overflowed" pins for free, but don't know the
2619 : * number of pins in PrivateRefCountArray. The cost of calculating that
2620 : * exactly doesn't seem worth it, so just assume the max.
2621 : */
2622 3988401 : estimated_pins_held = PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
2623 :
2624 : /* Is this backend already holding more than its fair share? */
2625 3988401 : if (estimated_pins_held > MaxProportionalPins)
2626 1310117 : return 0;
2627 :
2628 2678284 : return MaxProportionalPins - estimated_pins_held;
2629 : }
2630 :
2631 : /*
2632 : * Limit the number of pins a batch operation may additionally acquire, to
2633 : * avoid running out of pinnable buffers.
2634 : *
2635 : * One additional pin is always allowed, on the assumption that the operation
2636 : * requires at least one to make progress.
2637 : */
2638 : void
2639 205029 : LimitAdditionalPins(uint32 *additional_pins)
2640 : {
2641 : uint32 limit;
2642 :
2643 205029 : if (*additional_pins <= 1)
2644 195014 : return;
2645 :
2646 10015 : limit = GetAdditionalPinLimit();
2647 10015 : limit = Max(limit, 1);
2648 10015 : if (limit < *additional_pins)
2649 5511 : *additional_pins = limit;
2650 : }
2651 :
2652 : /*
2653 : * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
2654 : * avoid duplicating the tracing and relpersistence related logic.
2655 : */
2656 : static BlockNumber
2657 216542 : ExtendBufferedRelCommon(BufferManagerRelation bmr,
2658 : ForkNumber fork,
2659 : BufferAccessStrategy strategy,
2660 : uint32 flags,
2661 : uint32 extend_by,
2662 : BlockNumber extend_upto,
2663 : Buffer *buffers,
2664 : uint32 *extended_by)
2665 : {
2666 : BlockNumber first_block;
2667 :
2668 : TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
2669 : BMR_GET_SMGR(bmr)->smgr_rlocator.locator.spcOid,
2670 : BMR_GET_SMGR(bmr)->smgr_rlocator.locator.dbOid,
2671 : BMR_GET_SMGR(bmr)->smgr_rlocator.locator.relNumber,
2672 : BMR_GET_SMGR(bmr)->smgr_rlocator.backend,
2673 : extend_by);
2674 :
2675 216542 : if (bmr.relpersistence == RELPERSISTENCE_TEMP)
2676 11513 : first_block = ExtendBufferedRelLocal(bmr, fork, flags,
2677 : extend_by, extend_upto,
2678 : buffers, &extend_by);
2679 : else
2680 205029 : first_block = ExtendBufferedRelShared(bmr, fork, strategy, flags,
2681 : extend_by, extend_upto,
2682 : buffers, &extend_by);
2683 216542 : *extended_by = extend_by;
2684 :
2685 : TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
2686 : BMR_GET_SMGR(bmr)->smgr_rlocator.locator.spcOid,
2687 : BMR_GET_SMGR(bmr)->smgr_rlocator.locator.dbOid,
2688 : BMR_GET_SMGR(bmr)->smgr_rlocator.locator.relNumber,
2689 : BMR_GET_SMGR(bmr)->smgr_rlocator.backend,
2690 : *extended_by,
2691 : first_block);
2692 :
2693 216542 : return first_block;
2694 : }
2695 :
2696 : /*
2697 : * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
2698 : * shared buffers.
2699 : */
2700 : static BlockNumber
2701 205029 : ExtendBufferedRelShared(BufferManagerRelation bmr,
2702 : ForkNumber fork,
2703 : BufferAccessStrategy strategy,
2704 : uint32 flags,
2705 : uint32 extend_by,
2706 : BlockNumber extend_upto,
2707 : Buffer *buffers,
2708 : uint32 *extended_by)
2709 : {
2710 : BlockNumber first_block;
2711 205029 : IOContext io_context = IOContextForStrategy(strategy);
2712 : instr_time io_start;
2713 :
2714 205029 : LimitAdditionalPins(&extend_by);
2715 :
2716 : /*
2717 : * Acquire victim buffers for extension without holding extension lock.
2718 : * Writing out victim buffers is the most expensive part of extending the
2719 : * relation, particularly when doing so requires WAL flushes. Zeroing out
2720 : * the buffers is also quite expensive, so do that before holding the
2721 : * extension lock as well.
2722 : *
2723 : * These pages are pinned by us and not valid. While we hold the pin they
2724 : * can't be acquired as victim buffers by another backend.
2725 : */
2726 431353 : for (uint32 i = 0; i < extend_by; i++)
2727 : {
2728 : Block buf_block;
2729 :
2730 226324 : buffers[i] = GetVictimBuffer(strategy, io_context);
2731 226324 : buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
2732 :
2733 : /* new buffers are zero-filled */
2734 226324 : MemSet(buf_block, 0, BLCKSZ);
2735 : }
2736 :
2737 : /*
2738 : * Lock relation against concurrent extensions, unless requested not to.
2739 : *
2740 : * We use the same extension lock for all forks. That's unnecessarily
2741 : * restrictive, but currently extensions for forks don't happen often
2742 : * enough to make it worth locking more granularly.
2743 : *
2744 : * Note that another backend might have extended the relation by the time
2745 : * we get the lock.
2746 : */
2747 205029 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
2748 152827 : LockRelationForExtension(bmr.rel, ExclusiveLock);
2749 :
2750 : /*
2751 : * If requested, invalidate size cache, so that smgrnblocks asks the
2752 : * kernel.
2753 : */
2754 205029 : if (flags & EB_CLEAR_SIZE_CACHE)
2755 7865 : BMR_GET_SMGR(bmr)->smgr_cached_nblocks[fork] = InvalidBlockNumber;
2756 :
2757 205029 : first_block = smgrnblocks(BMR_GET_SMGR(bmr), fork);
2758 :
2759 : /*
2760 : * Now that we have the accurate relation size, check if the caller wants
2761 : * us to extend to only up to a specific size. If there were concurrent
2762 : * extensions, we might have acquired too many buffers and need to release
2763 : * them.
2764 : */
2765 205029 : if (extend_upto != InvalidBlockNumber)
2766 : {
2767 53986 : uint32 orig_extend_by = extend_by;
2768 :
2769 53986 : if (first_block > extend_upto)
2770 0 : extend_by = 0;
2771 53986 : else if ((uint64) first_block + extend_by > extend_upto)
2772 6 : extend_by = extend_upto - first_block;
2773 :
2774 54004 : for (uint32 i = extend_by; i < orig_extend_by; i++)
2775 : {
2776 18 : BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
2777 :
2778 18 : UnpinBuffer(buf_hdr);
2779 : }
2780 :
2781 53986 : if (extend_by == 0)
2782 : {
2783 6 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
2784 6 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
2785 6 : *extended_by = extend_by;
2786 6 : return first_block;
2787 : }
2788 : }
2789 :
2790 : /* Fail if relation is already at maximum possible length */
2791 205023 : if ((uint64) first_block + extend_by >= MaxBlockNumber)
2792 0 : ereport(ERROR,
2793 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2794 : errmsg("cannot extend relation %s beyond %u blocks",
2795 : relpath(BMR_GET_SMGR(bmr)->smgr_rlocator, fork).str,
2796 : MaxBlockNumber)));
2797 :
2798 : /*
2799 : * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
2800 : *
2801 : * This needs to happen before we extend the relation, because as soon as
2802 : * we do, other backends can start to read in those pages.
2803 : */
2804 431329 : for (uint32 i = 0; i < extend_by; i++)
2805 : {
2806 226306 : Buffer victim_buf = buffers[i];
2807 226306 : BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
2808 : BufferTag tag;
2809 : uint32 hash;
2810 : LWLock *partition_lock;
2811 : int existing_id;
2812 :
2813 : /* in case we need to pin an existing buffer below */
2814 226306 : ResourceOwnerEnlarge(CurrentResourceOwner);
2815 226306 : ReservePrivateRefCountEntry();
2816 :
2817 226306 : InitBufferTag(&tag, &BMR_GET_SMGR(bmr)->smgr_rlocator.locator, fork,
2818 : first_block + i);
2819 226306 : hash = BufTableHashCode(&tag);
2820 226306 : partition_lock = BufMappingPartitionLock(hash);
2821 :
2822 226306 : LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2823 :
2824 226306 : existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
2825 :
2826 : /*
2827 : * We get here only in the corner case where we are trying to extend
2828 : * the relation but we found a pre-existing buffer. This can happen
2829 : * because a prior attempt at extending the relation failed, and
2830 : * because mdread doesn't complain about reads beyond EOF (when
2831 : * zero_damaged_pages is ON) and so a previous attempt to read a block
2832 : * beyond EOF could have left a "valid" zero-filled buffer.
2833 : *
2834 : * This has also been observed when relation was overwritten by
2835 : * external process. Since the legitimate cases should always have
2836 : * left a zero-filled buffer, complain if not PageIsNew.
2837 : */
2838 226306 : if (existing_id >= 0)
2839 : {
2840 0 : BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
2841 : Block buf_block;
2842 : bool valid;
2843 :
2844 : /*
2845 : * Pin the existing buffer before releasing the partition lock,
2846 : * preventing it from being evicted.
2847 : */
2848 0 : valid = PinBuffer(existing_hdr, strategy, false);
2849 :
2850 0 : LWLockRelease(partition_lock);
2851 0 : UnpinBuffer(victim_buf_hdr);
2852 :
2853 0 : buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
2854 0 : buf_block = BufHdrGetBlock(existing_hdr);
2855 :
2856 0 : if (valid && !PageIsNew((Page) buf_block))
2857 0 : ereport(ERROR,
2858 : (errmsg("unexpected data beyond EOF in block %u of relation \"%s\"",
2859 : existing_hdr->tag.blockNum,
2860 : relpath(BMR_GET_SMGR(bmr)->smgr_rlocator, fork).str)));
2861 :
2862 : /*
2863 : * We *must* do smgr[zero]extend before succeeding, else the page
2864 : * will not be reserved by the kernel, and the next P_NEW call
2865 : * will decide to return the same page. Clear the BM_VALID bit,
2866 : * do StartBufferIO() and proceed.
2867 : *
2868 : * Loop to handle the very small possibility that someone re-sets
2869 : * BM_VALID between our clearing it and StartBufferIO inspecting
2870 : * it.
2871 : */
2872 : do
2873 : {
2874 0 : pg_atomic_fetch_and_u64(&existing_hdr->state, ~BM_VALID);
2875 0 : } while (!StartBufferIO(existing_hdr, true, false));
2876 : }
2877 : else
2878 : {
2879 : uint64 buf_state;
2880 226306 : uint64 set_bits = 0;
2881 :
2882 226306 : buf_state = LockBufHdr(victim_buf_hdr);
2883 :
2884 : /* some sanity checks while we hold the buffer header lock */
2885 : Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
2886 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2887 :
2888 226306 : victim_buf_hdr->tag = tag;
2889 :
2890 226306 : set_bits |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
2891 226306 : if (bmr.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
2892 220978 : set_bits |= BM_PERMANENT;
2893 :
2894 226306 : UnlockBufHdrExt(victim_buf_hdr, buf_state,
2895 : set_bits, 0,
2896 : 0);
2897 :
2898 226306 : LWLockRelease(partition_lock);
2899 :
2900 : /* XXX: could combine the locked operations in it with the above */
2901 226306 : StartBufferIO(victim_buf_hdr, true, false);
2902 : }
2903 : }
2904 :
2905 205023 : io_start = pgstat_prepare_io_time(track_io_timing);
2906 :
2907 : /*
2908 : * Note: if smgrzeroextend fails, we will end up with buffers that are
2909 : * allocated but not marked BM_VALID. The next relation extension will
2910 : * still select the same block number (because the relation didn't get any
2911 : * longer on disk) and so future attempts to extend the relation will find
2912 : * the same buffers (if they have not been recycled) but come right back
2913 : * here to try smgrzeroextend again.
2914 : *
2915 : * We don't need to set checksum for all-zero pages.
2916 : */
2917 205023 : smgrzeroextend(BMR_GET_SMGR(bmr), fork, first_block, extend_by, false);
2918 :
2919 : /*
2920 : * Release the file-extension lock; it's now OK for someone else to extend
2921 : * the relation some more.
2922 : *
2923 : * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
2924 : * take noticeable time.
2925 : */
2926 205023 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
2927 152821 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
2928 :
2929 205023 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context, IOOP_EXTEND,
2930 205023 : io_start, 1, extend_by * BLCKSZ);
2931 :
2932 : /* Set BM_VALID, terminate IO, and wake up any waiters */
2933 431329 : for (uint32 i = 0; i < extend_by; i++)
2934 : {
2935 226306 : Buffer buf = buffers[i];
2936 226306 : BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
2937 226306 : bool lock = false;
2938 :
2939 226306 : if (flags & EB_LOCK_FIRST && i == 0)
2940 150776 : lock = true;
2941 75530 : else if (flags & EB_LOCK_TARGET)
2942 : {
2943 : Assert(extend_upto != InvalidBlockNumber);
2944 45033 : if (first_block + i + 1 == extend_upto)
2945 44454 : lock = true;
2946 : }
2947 :
2948 226306 : if (lock)
2949 195230 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
2950 :
2951 226306 : TerminateBufferIO(buf_hdr, false, BM_VALID, true, false);
2952 : }
2953 :
2954 205023 : pgBufferUsage.shared_blks_written += extend_by;
2955 :
2956 205023 : *extended_by = extend_by;
2957 :
2958 205023 : return first_block;
2959 : }
2960 :
2961 : /*
2962 : * BufferIsLockedByMe
2963 : *
2964 : * Checks if this backend has the buffer locked in any mode.
2965 : *
2966 : * Buffer must be pinned.
2967 : */
2968 : bool
2969 0 : BufferIsLockedByMe(Buffer buffer)
2970 : {
2971 : BufferDesc *bufHdr;
2972 :
2973 : Assert(BufferIsPinned(buffer));
2974 :
2975 0 : if (BufferIsLocal(buffer))
2976 : {
2977 : /* Content locks are not maintained for local buffers. */
2978 0 : return true;
2979 : }
2980 : else
2981 : {
2982 0 : bufHdr = GetBufferDescriptor(buffer - 1);
2983 0 : return BufferLockHeldByMe(bufHdr);
2984 : }
2985 : }
2986 :
2987 : /*
2988 : * BufferIsLockedByMeInMode
2989 : *
2990 : * Checks if this backend has the buffer locked in the specified mode.
2991 : *
2992 : * Buffer must be pinned.
2993 : */
2994 : bool
2995 0 : BufferIsLockedByMeInMode(Buffer buffer, BufferLockMode mode)
2996 : {
2997 : BufferDesc *bufHdr;
2998 :
2999 : Assert(BufferIsPinned(buffer));
3000 :
3001 0 : if (BufferIsLocal(buffer))
3002 : {
3003 : /* Content locks are not maintained for local buffers. */
3004 0 : return true;
3005 : }
3006 : else
3007 : {
3008 0 : bufHdr = GetBufferDescriptor(buffer - 1);
3009 0 : return BufferLockHeldByMeInMode(bufHdr, mode);
3010 : }
3011 : }
3012 :
3013 : /*
3014 : * BufferIsDirty
3015 : *
3016 : * Checks if buffer is already dirty.
3017 : *
3018 : * Buffer must be pinned and [share-]exclusive-locked. (Without such a lock,
3019 : * the result may be stale before it's returned.)
3020 : */
3021 : bool
3022 0 : BufferIsDirty(Buffer buffer)
3023 : {
3024 : BufferDesc *bufHdr;
3025 :
3026 : Assert(BufferIsPinned(buffer));
3027 :
3028 0 : if (BufferIsLocal(buffer))
3029 : {
3030 0 : int bufid = -buffer - 1;
3031 :
3032 0 : bufHdr = GetLocalBufferDescriptor(bufid);
3033 : /* Content locks are not maintained for local buffers. */
3034 : }
3035 : else
3036 : {
3037 0 : bufHdr = GetBufferDescriptor(buffer - 1);
3038 : Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_SHARE_EXCLUSIVE) ||
3039 : BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
3040 : }
3041 :
3042 0 : return pg_atomic_read_u64(&bufHdr->state) & BM_DIRTY;
3043 : }
3044 :
3045 : /*
3046 : * MarkBufferDirty
3047 : *
3048 : * Marks buffer contents as dirty (actual write happens later).
3049 : *
3050 : * Buffer must be pinned and exclusive-locked. (If caller does not hold
3051 : * exclusive lock, then somebody could be in process of writing the buffer,
3052 : * leading to risk of bad data written to disk.)
3053 : */
3054 : void
3055 22085912 : MarkBufferDirty(Buffer buffer)
3056 : {
3057 : BufferDesc *bufHdr;
3058 : uint64 buf_state;
3059 : uint64 old_buf_state;
3060 :
3061 22085912 : if (!BufferIsValid(buffer))
3062 0 : elog(ERROR, "bad buffer ID: %d", buffer);
3063 :
3064 22085912 : if (BufferIsLocal(buffer))
3065 : {
3066 1225805 : MarkLocalBufferDirty(buffer);
3067 1225805 : return;
3068 : }
3069 :
3070 20860107 : bufHdr = GetBufferDescriptor(buffer - 1);
3071 :
3072 : Assert(BufferIsPinned(buffer));
3073 : Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
3074 :
3075 : /*
3076 : * NB: We have to wait for the buffer header spinlock to be not held, as
3077 : * TerminateBufferIO() relies on the spinlock.
3078 : */
3079 20860107 : old_buf_state = pg_atomic_read_u64(&bufHdr->state);
3080 : for (;;)
3081 : {
3082 20860624 : if (old_buf_state & BM_LOCKED)
3083 655 : old_buf_state = WaitBufHdrUnlocked(bufHdr);
3084 :
3085 20860624 : buf_state = old_buf_state;
3086 :
3087 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
3088 20860624 : buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
3089 :
3090 20860624 : if (pg_atomic_compare_exchange_u64(&bufHdr->state, &old_buf_state,
3091 : buf_state))
3092 20860107 : break;
3093 : }
3094 :
3095 : /*
3096 : * If the buffer was not dirty already, do vacuum accounting.
3097 : */
3098 20860107 : if (!(old_buf_state & BM_DIRTY))
3099 : {
3100 666296 : pgBufferUsage.shared_blks_dirtied++;
3101 666296 : if (VacuumCostActive)
3102 7472 : VacuumCostBalance += VacuumCostPageDirty;
3103 : }
3104 : }
3105 :
3106 : /*
3107 : * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
3108 : *
3109 : * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
3110 : * compared to calling the two routines separately. Now it's mainly just
3111 : * a convenience function. However, if the passed buffer is valid and
3112 : * already contains the desired block, we just return it as-is; and that
3113 : * does save considerable work compared to a full release and reacquire.
3114 : *
3115 : * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
3116 : * buffer actually needs to be released. This case is the same as ReadBuffer,
3117 : * but can save some tests in the caller.
3118 : */
3119 : Buffer
3120 30174643 : ReleaseAndReadBuffer(Buffer buffer,
3121 : Relation relation,
3122 : BlockNumber blockNum)
3123 : {
3124 30174643 : ForkNumber forkNum = MAIN_FORKNUM;
3125 : BufferDesc *bufHdr;
3126 :
3127 30174643 : if (BufferIsValid(buffer))
3128 : {
3129 : Assert(BufferIsPinned(buffer));
3130 18200545 : if (BufferIsLocal(buffer))
3131 : {
3132 37251 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3133 41070 : if (bufHdr->tag.blockNum == blockNum &&
3134 7638 : BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
3135 3819 : BufTagGetForkNum(&bufHdr->tag) == forkNum)
3136 3819 : return buffer;
3137 33432 : UnpinLocalBuffer(buffer);
3138 : }
3139 : else
3140 : {
3141 18163294 : bufHdr = GetBufferDescriptor(buffer - 1);
3142 : /* we have pin, so it's ok to examine tag without spinlock */
3143 24286422 : if (bufHdr->tag.blockNum == blockNum &&
3144 12246256 : BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
3145 6123128 : BufTagGetForkNum(&bufHdr->tag) == forkNum)
3146 6123128 : return buffer;
3147 12040166 : UnpinBuffer(bufHdr);
3148 : }
3149 : }
3150 :
3151 24047696 : return ReadBuffer(relation, blockNum);
3152 : }
3153 :
3154 : /*
3155 : * PinBuffer -- make buffer unavailable for replacement.
3156 : *
3157 : * For the default access strategy, the buffer's usage_count is incremented
3158 : * when we first pin it; for other strategies we just make sure the usage_count
3159 : * isn't zero. (The idea of the latter is that we don't want synchronized
3160 : * heap scans to inflate the count, but we need it to not be zero to discourage
3161 : * other backends from stealing buffers from our ring. As long as we cycle
3162 : * through the ring faster than the global clock-sweep cycles, buffers in
3163 : * our ring won't be chosen as victims for replacement by other backends.)
3164 : *
3165 : * This should be applied only to shared buffers, never local ones.
3166 : *
3167 : * Since buffers are pinned/unpinned very frequently, pin buffers without
3168 : * taking the buffer header lock; instead update the state variable in loop of
3169 : * CAS operations. Hopefully it's just a single CAS.
3170 : *
3171 : * Note that ResourceOwnerEnlarge() and ReservePrivateRefCountEntry()
3172 : * must have been done already.
3173 : *
3174 : * Returns true if buffer is BM_VALID, else false. This provision allows
3175 : * some callers to avoid an extra spinlock cycle. If skip_if_not_valid is
3176 : * true, then a false return value also indicates that the buffer was
3177 : * (recently) invalid and has not been pinned.
3178 : */
3179 : static bool
3180 61401523 : PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy,
3181 : bool skip_if_not_valid)
3182 : {
3183 61401523 : Buffer b = BufferDescriptorGetBuffer(buf);
3184 : bool result;
3185 : PrivateRefCountEntry *ref;
3186 :
3187 : Assert(!BufferIsLocal(b));
3188 : Assert(ReservedRefCountSlot != -1);
3189 :
3190 61401523 : ref = GetPrivateRefCountEntry(b, true);
3191 :
3192 61401523 : if (ref == NULL)
3193 : {
3194 : uint64 buf_state;
3195 : uint64 old_buf_state;
3196 :
3197 59016359 : old_buf_state = pg_atomic_read_u64(&buf->state);
3198 : for (;;)
3199 : {
3200 59056169 : if (unlikely(skip_if_not_valid && !(old_buf_state & BM_VALID)))
3201 6 : return false;
3202 :
3203 : /*
3204 : * We're not allowed to increase the refcount while the buffer
3205 : * header spinlock is held. Wait for the lock to be released.
3206 : */
3207 59056163 : if (old_buf_state & BM_LOCKED)
3208 670 : old_buf_state = WaitBufHdrUnlocked(buf);
3209 :
3210 59056163 : buf_state = old_buf_state;
3211 :
3212 : /* increase refcount */
3213 59056163 : buf_state += BUF_REFCOUNT_ONE;
3214 :
3215 59056163 : if (strategy == NULL)
3216 : {
3217 : /* Default case: increase usagecount unless already max. */
3218 58211422 : if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
3219 3427846 : buf_state += BUF_USAGECOUNT_ONE;
3220 : }
3221 : else
3222 : {
3223 : /*
3224 : * Ring buffers shouldn't evict others from pool. Thus we
3225 : * don't make usagecount more than 1.
3226 : */
3227 844741 : if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3228 32725 : buf_state += BUF_USAGECOUNT_ONE;
3229 : }
3230 :
3231 59056163 : if (pg_atomic_compare_exchange_u64(&buf->state, &old_buf_state,
3232 : buf_state))
3233 : {
3234 59016353 : result = (buf_state & BM_VALID) != 0;
3235 :
3236 59016353 : TrackNewBufferPin(b);
3237 59016353 : break;
3238 : }
3239 : }
3240 : }
3241 : else
3242 : {
3243 : /*
3244 : * If we previously pinned the buffer, it is likely to be valid, but
3245 : * it may not be if StartReadBuffers() was called and
3246 : * WaitReadBuffers() hasn't been called yet. We'll check by loading
3247 : * the flags without locking. This is racy, but it's OK to return
3248 : * false spuriously: when WaitReadBuffers() calls StartBufferIO(),
3249 : * it'll see that it's now valid.
3250 : *
3251 : * Note: We deliberately avoid a Valgrind client request here.
3252 : * Individual access methods can optionally superimpose buffer page
3253 : * client requests on top of our client requests to enforce that
3254 : * buffers are only accessed while locked (and pinned). It's possible
3255 : * that the buffer page is legitimately non-accessible here. We
3256 : * cannot meddle with that.
3257 : */
3258 2385164 : result = (pg_atomic_read_u64(&buf->state) & BM_VALID) != 0;
3259 :
3260 : Assert(ref->data.refcount > 0);
3261 2385164 : ref->data.refcount++;
3262 2385164 : ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
3263 : }
3264 :
3265 61401517 : return result;
3266 : }
3267 :
3268 : /*
3269 : * PinBuffer_Locked -- as above, but caller already locked the buffer header.
3270 : * The spinlock is released before return.
3271 : *
3272 : * As this function is called with the spinlock held, the caller has to
3273 : * previously call ReservePrivateRefCountEntry() and
3274 : * ResourceOwnerEnlarge(CurrentResourceOwner);
3275 : *
3276 : * Currently, no callers of this function want to modify the buffer's
3277 : * usage_count at all, so there's no need for a strategy parameter.
3278 : * Also we don't bother with a BM_VALID test (the caller could check that for
3279 : * itself).
3280 : *
3281 : * Also all callers only ever use this function when it's known that the
3282 : * buffer can't have a preexisting pin by this backend. That allows us to skip
3283 : * searching the private refcount array & hash, which is a boon, because the
3284 : * spinlock is still held.
3285 : *
3286 : * Note: use of this routine is frequently mandatory, not just an optimization
3287 : * to save a spin lock/unlock cycle, because we need to pin a buffer before
3288 : * its state can change under us.
3289 : */
3290 : static void
3291 307438 : PinBuffer_Locked(BufferDesc *buf)
3292 : {
3293 : uint64 old_buf_state;
3294 :
3295 : /*
3296 : * As explained, We don't expect any preexisting pins. That allows us to
3297 : * manipulate the PrivateRefCount after releasing the spinlock
3298 : */
3299 : Assert(GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf), false) == NULL);
3300 :
3301 : /*
3302 : * Since we hold the buffer spinlock, we can update the buffer state and
3303 : * release the lock in one operation.
3304 : */
3305 307438 : old_buf_state = pg_atomic_read_u64(&buf->state);
3306 :
3307 307438 : UnlockBufHdrExt(buf, old_buf_state,
3308 : 0, 0, 1);
3309 :
3310 307438 : TrackNewBufferPin(BufferDescriptorGetBuffer(buf));
3311 307438 : }
3312 :
3313 : /*
3314 : * Support for waking up another backend that is waiting for the cleanup lock
3315 : * to be released using BM_PIN_COUNT_WAITER.
3316 : *
3317 : * See LockBufferForCleanup().
3318 : *
3319 : * Expected to be called just after releasing a buffer pin (in a BufferDesc,
3320 : * not just reducing the backend-local pincount for the buffer).
3321 : */
3322 : static void
3323 89 : WakePinCountWaiter(BufferDesc *buf)
3324 : {
3325 : /*
3326 : * Acquire the buffer header lock, re-check that there's a waiter. Another
3327 : * backend could have unpinned this buffer, and already woken up the
3328 : * waiter.
3329 : *
3330 : * There's no danger of the buffer being replaced after we unpinned it
3331 : * above, as it's pinned by the waiter. The waiter removes
3332 : * BM_PIN_COUNT_WAITER if it stops waiting for a reason other than this
3333 : * backend waking it up.
3334 : */
3335 89 : uint64 buf_state = LockBufHdr(buf);
3336 :
3337 89 : if ((buf_state & BM_PIN_COUNT_WAITER) &&
3338 89 : BUF_STATE_GET_REFCOUNT(buf_state) == 1)
3339 86 : {
3340 : /* we just released the last pin other than the waiter's */
3341 86 : int wait_backend_pgprocno = buf->wait_backend_pgprocno;
3342 :
3343 86 : UnlockBufHdrExt(buf, buf_state,
3344 : 0, BM_PIN_COUNT_WAITER,
3345 : 0);
3346 86 : ProcSendSignal(wait_backend_pgprocno);
3347 : }
3348 : else
3349 3 : UnlockBufHdr(buf);
3350 89 : }
3351 :
3352 : /*
3353 : * UnpinBuffer -- make buffer available for replacement.
3354 : *
3355 : * This should be applied only to shared buffers, never local ones. This
3356 : * always adjusts CurrentResourceOwner.
3357 : */
3358 : static void
3359 75615127 : UnpinBuffer(BufferDesc *buf)
3360 : {
3361 75615127 : Buffer b = BufferDescriptorGetBuffer(buf);
3362 :
3363 75615127 : ResourceOwnerForgetBuffer(CurrentResourceOwner, b);
3364 75615127 : UnpinBufferNoOwner(buf);
3365 75615127 : }
3366 :
3367 : static void
3368 75619805 : UnpinBufferNoOwner(BufferDesc *buf)
3369 : {
3370 : PrivateRefCountEntry *ref;
3371 75619805 : Buffer b = BufferDescriptorGetBuffer(buf);
3372 :
3373 : Assert(!BufferIsLocal(b));
3374 :
3375 : /* not moving as we're likely deleting it soon anyway */
3376 75619805 : ref = GetPrivateRefCountEntry(b, false);
3377 : Assert(ref != NULL);
3378 : Assert(ref->data.refcount > 0);
3379 75619805 : ref->data.refcount--;
3380 75619805 : if (ref->data.refcount == 0)
3381 : {
3382 : uint64 old_buf_state;
3383 :
3384 : /*
3385 : * Mark buffer non-accessible to Valgrind.
3386 : *
3387 : * Note that the buffer may have already been marked non-accessible
3388 : * within access method code that enforces that buffers are only
3389 : * accessed while a buffer lock is held.
3390 : */
3391 : VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
3392 :
3393 : /*
3394 : * I'd better not still hold the buffer content lock. Can't use
3395 : * BufferIsLockedByMe(), as that asserts the buffer is pinned.
3396 : */
3397 : Assert(!BufferLockHeldByMe(buf));
3398 :
3399 : /* decrement the shared reference count */
3400 61330216 : old_buf_state = pg_atomic_fetch_sub_u64(&buf->state, BUF_REFCOUNT_ONE);
3401 :
3402 : /* Support LockBufferForCleanup() */
3403 61330216 : if (old_buf_state & BM_PIN_COUNT_WAITER)
3404 89 : WakePinCountWaiter(buf);
3405 :
3406 61330216 : ForgetPrivateRefCountEntry(ref);
3407 : }
3408 75619805 : }
3409 :
3410 : /*
3411 : * Set up backend-local tracking of a buffer pinned the first time by this
3412 : * backend.
3413 : */
3414 : inline void
3415 61330216 : TrackNewBufferPin(Buffer buf)
3416 : {
3417 : PrivateRefCountEntry *ref;
3418 :
3419 61330216 : ref = NewPrivateRefCountEntry(buf);
3420 61330216 : ref->data.refcount++;
3421 :
3422 61330216 : ResourceOwnerRememberBuffer(CurrentResourceOwner, buf);
3423 :
3424 : /*
3425 : * This is the first pin for this page by this backend, mark its page as
3426 : * defined to valgrind. While the page contents might not actually be
3427 : * valid yet, we don't currently guarantee that such pages are marked
3428 : * undefined or non-accessible.
3429 : *
3430 : * It's not necessarily the prettiest to do this here, but otherwise we'd
3431 : * need this block of code in multiple places.
3432 : */
3433 : VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(GetBufferDescriptor(buf - 1)),
3434 : BLCKSZ);
3435 61330216 : }
3436 :
3437 : #define ST_SORT sort_checkpoint_bufferids
3438 : #define ST_ELEMENT_TYPE CkptSortItem
3439 : #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
3440 : #define ST_SCOPE static
3441 : #define ST_DEFINE
3442 : #include "lib/sort_template.h"
3443 :
3444 : /*
3445 : * BufferSync -- Write out all dirty buffers in the pool.
3446 : *
3447 : * This is called at checkpoint time to write out all dirty shared buffers.
3448 : * The checkpoint request flags should be passed in. If CHECKPOINT_FAST is
3449 : * set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
3450 : * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_UNLOGGED is set, we write
3451 : * even unlogged buffers, which are otherwise skipped. The remaining flags
3452 : * currently have no effect here.
3453 : */
3454 : static void
3455 1808 : BufferSync(int flags)
3456 : {
3457 : uint64 buf_state;
3458 : int buf_id;
3459 : int num_to_scan;
3460 : int num_spaces;
3461 : int num_processed;
3462 : int num_written;
3463 1808 : CkptTsStatus *per_ts_stat = NULL;
3464 : Oid last_tsid;
3465 : binaryheap *ts_heap;
3466 : int i;
3467 1808 : uint64 mask = BM_DIRTY;
3468 : WritebackContext wb_context;
3469 :
3470 : /*
3471 : * Unless this is a shutdown checkpoint or we have been explicitly told,
3472 : * we write only permanent, dirty buffers. But at shutdown or end of
3473 : * recovery, we write all dirty buffers.
3474 : */
3475 1808 : if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
3476 : CHECKPOINT_FLUSH_UNLOGGED))))
3477 1003 : mask |= BM_PERMANENT;
3478 :
3479 : /*
3480 : * Loop over all buffers, and mark the ones that need to be written with
3481 : * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
3482 : * can estimate how much work needs to be done.
3483 : *
3484 : * This allows us to write only those pages that were dirty when the
3485 : * checkpoint began, and not those that get dirtied while it proceeds.
3486 : * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
3487 : * later in this function, or by normal backends or the bgwriter cleaning
3488 : * scan, the flag is cleared. Any buffer dirtied after this point won't
3489 : * have the flag set.
3490 : *
3491 : * Note that if we fail to write some buffer, we may leave buffers with
3492 : * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
3493 : * certainly need to be written for the next checkpoint attempt, too.
3494 : */
3495 1808 : num_to_scan = 0;
3496 12554976 : for (buf_id = 0; buf_id < NBuffers; buf_id++)
3497 : {
3498 12553168 : BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
3499 12553168 : uint64 set_bits = 0;
3500 :
3501 : /*
3502 : * Header spinlock is enough to examine BM_DIRTY, see comment in
3503 : * SyncOneBuffer.
3504 : */
3505 12553168 : buf_state = LockBufHdr(bufHdr);
3506 :
3507 12553168 : if ((buf_state & mask) == mask)
3508 : {
3509 : CkptSortItem *item;
3510 :
3511 302021 : set_bits = BM_CHECKPOINT_NEEDED;
3512 :
3513 302021 : item = &CkptBufferIds[num_to_scan++];
3514 302021 : item->buf_id = buf_id;
3515 302021 : item->tsId = bufHdr->tag.spcOid;
3516 302021 : item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
3517 302021 : item->forkNum = BufTagGetForkNum(&bufHdr->tag);
3518 302021 : item->blockNum = bufHdr->tag.blockNum;
3519 : }
3520 :
3521 12553168 : UnlockBufHdrExt(bufHdr, buf_state,
3522 : set_bits, 0,
3523 : 0);
3524 :
3525 : /* Check for barrier events in case NBuffers is large. */
3526 12553168 : if (ProcSignalBarrierPending)
3527 0 : ProcessProcSignalBarrier();
3528 : }
3529 :
3530 1808 : if (num_to_scan == 0)
3531 703 : return; /* nothing to do */
3532 :
3533 1105 : WritebackContextInit(&wb_context, &checkpoint_flush_after);
3534 :
3535 : TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
3536 :
3537 : /*
3538 : * Sort buffers that need to be written to reduce the likelihood of random
3539 : * IO. The sorting is also important for the implementation of balancing
3540 : * writes between tablespaces. Without balancing writes we'd potentially
3541 : * end up writing to the tablespaces one-by-one; possibly overloading the
3542 : * underlying system.
3543 : */
3544 1105 : sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
3545 :
3546 1105 : num_spaces = 0;
3547 :
3548 : /*
3549 : * Allocate progress status for each tablespace with buffers that need to
3550 : * be flushed. This requires the to-be-flushed array to be sorted.
3551 : */
3552 1105 : last_tsid = InvalidOid;
3553 303126 : for (i = 0; i < num_to_scan; i++)
3554 : {
3555 : CkptTsStatus *s;
3556 : Oid cur_tsid;
3557 :
3558 302021 : cur_tsid = CkptBufferIds[i].tsId;
3559 :
3560 : /*
3561 : * Grow array of per-tablespace status structs, every time a new
3562 : * tablespace is found.
3563 : */
3564 302021 : if (last_tsid == InvalidOid || last_tsid != cur_tsid)
3565 1680 : {
3566 : Size sz;
3567 :
3568 1680 : num_spaces++;
3569 :
3570 : /*
3571 : * Not worth adding grow-by-power-of-2 logic here - even with a
3572 : * few hundred tablespaces this should be fine.
3573 : */
3574 1680 : sz = sizeof(CkptTsStatus) * num_spaces;
3575 :
3576 1680 : if (per_ts_stat == NULL)
3577 1105 : per_ts_stat = (CkptTsStatus *) palloc(sz);
3578 : else
3579 575 : per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
3580 :
3581 1680 : s = &per_ts_stat[num_spaces - 1];
3582 1680 : memset(s, 0, sizeof(*s));
3583 1680 : s->tsId = cur_tsid;
3584 :
3585 : /*
3586 : * The first buffer in this tablespace. As CkptBufferIds is sorted
3587 : * by tablespace all (s->num_to_scan) buffers in this tablespace
3588 : * will follow afterwards.
3589 : */
3590 1680 : s->index = i;
3591 :
3592 : /*
3593 : * progress_slice will be determined once we know how many buffers
3594 : * are in each tablespace, i.e. after this loop.
3595 : */
3596 :
3597 1680 : last_tsid = cur_tsid;
3598 : }
3599 : else
3600 : {
3601 300341 : s = &per_ts_stat[num_spaces - 1];
3602 : }
3603 :
3604 302021 : s->num_to_scan++;
3605 :
3606 : /* Check for barrier events. */
3607 302021 : if (ProcSignalBarrierPending)
3608 0 : ProcessProcSignalBarrier();
3609 : }
3610 :
3611 : Assert(num_spaces > 0);
3612 :
3613 : /*
3614 : * Build a min-heap over the write-progress in the individual tablespaces,
3615 : * and compute how large a portion of the total progress a single
3616 : * processed buffer is.
3617 : */
3618 1105 : ts_heap = binaryheap_allocate(num_spaces,
3619 : ts_ckpt_progress_comparator,
3620 : NULL);
3621 :
3622 2785 : for (i = 0; i < num_spaces; i++)
3623 : {
3624 1680 : CkptTsStatus *ts_stat = &per_ts_stat[i];
3625 :
3626 1680 : ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
3627 :
3628 1680 : binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
3629 : }
3630 :
3631 1105 : binaryheap_build(ts_heap);
3632 :
3633 : /*
3634 : * Iterate through to-be-checkpointed buffers and write the ones (still)
3635 : * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
3636 : * tablespaces; otherwise the sorting would lead to only one tablespace
3637 : * receiving writes at a time, making inefficient use of the hardware.
3638 : */
3639 1105 : num_processed = 0;
3640 1105 : num_written = 0;
3641 303126 : while (!binaryheap_empty(ts_heap))
3642 : {
3643 302021 : BufferDesc *bufHdr = NULL;
3644 : CkptTsStatus *ts_stat = (CkptTsStatus *)
3645 302021 : DatumGetPointer(binaryheap_first(ts_heap));
3646 :
3647 302021 : buf_id = CkptBufferIds[ts_stat->index].buf_id;
3648 : Assert(buf_id != -1);
3649 :
3650 302021 : bufHdr = GetBufferDescriptor(buf_id);
3651 :
3652 302021 : num_processed++;
3653 :
3654 : /*
3655 : * We don't need to acquire the lock here, because we're only looking
3656 : * at a single bit. It's possible that someone else writes the buffer
3657 : * and clears the flag right after we check, but that doesn't matter
3658 : * since SyncOneBuffer will then do nothing. However, there is a
3659 : * further race condition: it's conceivable that between the time we
3660 : * examine the bit here and the time SyncOneBuffer acquires the lock,
3661 : * someone else not only wrote the buffer but replaced it with another
3662 : * page and dirtied it. In that improbable case, SyncOneBuffer will
3663 : * write the buffer though we didn't need to. It doesn't seem worth
3664 : * guarding against this, though.
3665 : */
3666 302021 : if (pg_atomic_read_u64(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
3667 : {
3668 280915 : if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
3669 : {
3670 : TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
3671 280915 : PendingCheckpointerStats.buffers_written++;
3672 280915 : num_written++;
3673 : }
3674 : }
3675 :
3676 : /*
3677 : * Measure progress independent of actually having to flush the buffer
3678 : * - otherwise writing become unbalanced.
3679 : */
3680 302021 : ts_stat->progress += ts_stat->progress_slice;
3681 302021 : ts_stat->num_scanned++;
3682 302021 : ts_stat->index++;
3683 :
3684 : /* Have all the buffers from the tablespace been processed? */
3685 302021 : if (ts_stat->num_scanned == ts_stat->num_to_scan)
3686 : {
3687 1680 : binaryheap_remove_first(ts_heap);
3688 : }
3689 : else
3690 : {
3691 : /* update heap with the new progress */
3692 300341 : binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
3693 : }
3694 :
3695 : /*
3696 : * Sleep to throttle our I/O rate.
3697 : *
3698 : * (This will check for barrier events even if it doesn't sleep.)
3699 : */
3700 302021 : CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
3701 : }
3702 :
3703 : /*
3704 : * Issue all pending flushes. Only checkpointer calls BufferSync(), so
3705 : * IOContext will always be IOCONTEXT_NORMAL.
3706 : */
3707 1105 : IssuePendingWritebacks(&wb_context, IOCONTEXT_NORMAL);
3708 :
3709 1105 : pfree(per_ts_stat);
3710 1105 : per_ts_stat = NULL;
3711 1105 : binaryheap_free(ts_heap);
3712 :
3713 : /*
3714 : * Update checkpoint statistics. As noted above, this doesn't include
3715 : * buffers written by other backends or bgwriter scan.
3716 : */
3717 1105 : CheckpointStats.ckpt_bufs_written += num_written;
3718 :
3719 : TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
3720 : }
3721 :
3722 : /*
3723 : * BgBufferSync -- Write out some dirty buffers in the pool.
3724 : *
3725 : * This is called periodically by the background writer process.
3726 : *
3727 : * Returns true if it's appropriate for the bgwriter process to go into
3728 : * low-power hibernation mode. (This happens if the strategy clock-sweep
3729 : * has been "lapped" and no buffer allocations have occurred recently,
3730 : * or if the bgwriter has been effectively disabled by setting
3731 : * bgwriter_lru_maxpages to 0.)
3732 : */
3733 : bool
3734 13505 : BgBufferSync(WritebackContext *wb_context)
3735 : {
3736 : /* info obtained from freelist.c */
3737 : int strategy_buf_id;
3738 : uint32 strategy_passes;
3739 : uint32 recent_alloc;
3740 :
3741 : /*
3742 : * Information saved between calls so we can determine the strategy
3743 : * point's advance rate and avoid scanning already-cleaned buffers.
3744 : */
3745 : static bool saved_info_valid = false;
3746 : static int prev_strategy_buf_id;
3747 : static uint32 prev_strategy_passes;
3748 : static int next_to_clean;
3749 : static uint32 next_passes;
3750 :
3751 : /* Moving averages of allocation rate and clean-buffer density */
3752 : static float smoothed_alloc = 0;
3753 : static float smoothed_density = 10.0;
3754 :
3755 : /* Potentially these could be tunables, but for now, not */
3756 13505 : float smoothing_samples = 16;
3757 13505 : float scan_whole_pool_milliseconds = 120000.0;
3758 :
3759 : /* Used to compute how far we scan ahead */
3760 : long strategy_delta;
3761 : int bufs_to_lap;
3762 : int bufs_ahead;
3763 : float scans_per_alloc;
3764 : int reusable_buffers_est;
3765 : int upcoming_alloc_est;
3766 : int min_scan_buffers;
3767 :
3768 : /* Variables for the scanning loop proper */
3769 : int num_to_scan;
3770 : int num_written;
3771 : int reusable_buffers;
3772 :
3773 : /* Variables for final smoothed_density update */
3774 : long new_strategy_delta;
3775 : uint32 new_recent_alloc;
3776 :
3777 : /*
3778 : * Find out where the clock-sweep currently is, and how many buffer
3779 : * allocations have happened since our last call.
3780 : */
3781 13505 : strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
3782 :
3783 : /* Report buffer alloc counts to pgstat */
3784 13505 : PendingBgWriterStats.buf_alloc += recent_alloc;
3785 :
3786 : /*
3787 : * If we're not running the LRU scan, just stop after doing the stats
3788 : * stuff. We mark the saved state invalid so that we can recover sanely
3789 : * if LRU scan is turned back on later.
3790 : */
3791 13505 : if (bgwriter_lru_maxpages <= 0)
3792 : {
3793 35 : saved_info_valid = false;
3794 35 : return true;
3795 : }
3796 :
3797 : /*
3798 : * Compute strategy_delta = how many buffers have been scanned by the
3799 : * clock-sweep since last time. If first time through, assume none. Then
3800 : * see if we are still ahead of the clock-sweep, and if so, how many
3801 : * buffers we could scan before we'd catch up with it and "lap" it. Note:
3802 : * weird-looking coding of xxx_passes comparisons are to avoid bogus
3803 : * behavior when the passes counts wrap around.
3804 : */
3805 13470 : if (saved_info_valid)
3806 : {
3807 12890 : int32 passes_delta = strategy_passes - prev_strategy_passes;
3808 :
3809 12890 : strategy_delta = strategy_buf_id - prev_strategy_buf_id;
3810 12890 : strategy_delta += (long) passes_delta * NBuffers;
3811 :
3812 : Assert(strategy_delta >= 0);
3813 :
3814 12890 : if ((int32) (next_passes - strategy_passes) > 0)
3815 : {
3816 : /* we're one pass ahead of the strategy point */
3817 2102 : bufs_to_lap = strategy_buf_id - next_to_clean;
3818 : #ifdef BGW_DEBUG
3819 : elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3820 : next_passes, next_to_clean,
3821 : strategy_passes, strategy_buf_id,
3822 : strategy_delta, bufs_to_lap);
3823 : #endif
3824 : }
3825 10788 : else if (next_passes == strategy_passes &&
3826 8405 : next_to_clean >= strategy_buf_id)
3827 : {
3828 : /* on same pass, but ahead or at least not behind */
3829 7613 : bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
3830 : #ifdef BGW_DEBUG
3831 : elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3832 : next_passes, next_to_clean,
3833 : strategy_passes, strategy_buf_id,
3834 : strategy_delta, bufs_to_lap);
3835 : #endif
3836 : }
3837 : else
3838 : {
3839 : /*
3840 : * We're behind, so skip forward to the strategy point and start
3841 : * cleaning from there.
3842 : */
3843 : #ifdef BGW_DEBUG
3844 : elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
3845 : next_passes, next_to_clean,
3846 : strategy_passes, strategy_buf_id,
3847 : strategy_delta);
3848 : #endif
3849 3175 : next_to_clean = strategy_buf_id;
3850 3175 : next_passes = strategy_passes;
3851 3175 : bufs_to_lap = NBuffers;
3852 : }
3853 : }
3854 : else
3855 : {
3856 : /*
3857 : * Initializing at startup or after LRU scanning had been off. Always
3858 : * start at the strategy point.
3859 : */
3860 : #ifdef BGW_DEBUG
3861 : elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
3862 : strategy_passes, strategy_buf_id);
3863 : #endif
3864 580 : strategy_delta = 0;
3865 580 : next_to_clean = strategy_buf_id;
3866 580 : next_passes = strategy_passes;
3867 580 : bufs_to_lap = NBuffers;
3868 : }
3869 :
3870 : /* Update saved info for next time */
3871 13470 : prev_strategy_buf_id = strategy_buf_id;
3872 13470 : prev_strategy_passes = strategy_passes;
3873 13470 : saved_info_valid = true;
3874 :
3875 : /*
3876 : * Compute how many buffers had to be scanned for each new allocation, ie,
3877 : * 1/density of reusable buffers, and track a moving average of that.
3878 : *
3879 : * If the strategy point didn't move, we don't update the density estimate
3880 : */
3881 13470 : if (strategy_delta > 0 && recent_alloc > 0)
3882 : {
3883 6906 : scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
3884 6906 : smoothed_density += (scans_per_alloc - smoothed_density) /
3885 : smoothing_samples;
3886 : }
3887 :
3888 : /*
3889 : * Estimate how many reusable buffers there are between the current
3890 : * strategy point and where we've scanned ahead to, based on the smoothed
3891 : * density estimate.
3892 : */
3893 13470 : bufs_ahead = NBuffers - bufs_to_lap;
3894 13470 : reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3895 :
3896 : /*
3897 : * Track a moving average of recent buffer allocations. Here, rather than
3898 : * a true average we want a fast-attack, slow-decline behavior: we
3899 : * immediately follow any increase.
3900 : */
3901 13470 : if (smoothed_alloc <= (float) recent_alloc)
3902 3787 : smoothed_alloc = recent_alloc;
3903 : else
3904 9683 : smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3905 : smoothing_samples;
3906 :
3907 : /* Scale the estimate by a GUC to allow more aggressive tuning. */
3908 13470 : upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3909 :
3910 : /*
3911 : * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3912 : * eventually underflow to zero, and the underflows produce annoying
3913 : * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3914 : * zero, there's no point in tracking smaller and smaller values of
3915 : * smoothed_alloc, so just reset it to exactly zero to avoid this
3916 : * syndrome. It will pop back up as soon as recent_alloc increases.
3917 : */
3918 13470 : if (upcoming_alloc_est == 0)
3919 2389 : smoothed_alloc = 0;
3920 :
3921 : /*
3922 : * Even in cases where there's been little or no buffer allocation
3923 : * activity, we want to make a small amount of progress through the buffer
3924 : * cache so that as many reusable buffers as possible are clean after an
3925 : * idle period.
3926 : *
3927 : * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3928 : * the BGW will be called during the scan_whole_pool time; slice the
3929 : * buffer pool into that many sections.
3930 : */
3931 13470 : min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3932 :
3933 13470 : if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3934 : {
3935 : #ifdef BGW_DEBUG
3936 : elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3937 : upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3938 : #endif
3939 6414 : upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3940 : }
3941 :
3942 : /*
3943 : * Now write out dirty reusable buffers, working forward from the
3944 : * next_to_clean point, until we have lapped the strategy scan, or cleaned
3945 : * enough buffers to match our estimate of the next cycle's allocation
3946 : * requirements, or hit the bgwriter_lru_maxpages limit.
3947 : */
3948 :
3949 13470 : num_to_scan = bufs_to_lap;
3950 13470 : num_written = 0;
3951 13470 : reusable_buffers = reusable_buffers_est;
3952 :
3953 : /* Execute the LRU scan */
3954 1929101 : while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3955 : {
3956 1915631 : int sync_state = SyncOneBuffer(next_to_clean, true,
3957 : wb_context);
3958 :
3959 1915631 : if (++next_to_clean >= NBuffers)
3960 : {
3961 3020 : next_to_clean = 0;
3962 3020 : next_passes++;
3963 : }
3964 1915631 : num_to_scan--;
3965 :
3966 1915631 : if (sync_state & BUF_WRITTEN)
3967 : {
3968 21765 : reusable_buffers++;
3969 21765 : if (++num_written >= bgwriter_lru_maxpages)
3970 : {
3971 0 : PendingBgWriterStats.maxwritten_clean++;
3972 0 : break;
3973 : }
3974 : }
3975 1893866 : else if (sync_state & BUF_REUSABLE)
3976 1476577 : reusable_buffers++;
3977 : }
3978 :
3979 13470 : PendingBgWriterStats.buf_written_clean += num_written;
3980 :
3981 : #ifdef BGW_DEBUG
3982 : elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3983 : recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3984 : smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3985 : bufs_to_lap - num_to_scan,
3986 : num_written,
3987 : reusable_buffers - reusable_buffers_est);
3988 : #endif
3989 :
3990 : /*
3991 : * Consider the above scan as being like a new allocation scan.
3992 : * Characterize its density and update the smoothed one based on it. This
3993 : * effectively halves the moving average period in cases where both the
3994 : * strategy and the background writer are doing some useful scanning,
3995 : * which is helpful because a long memory isn't as desirable on the
3996 : * density estimates.
3997 : */
3998 13470 : new_strategy_delta = bufs_to_lap - num_to_scan;
3999 13470 : new_recent_alloc = reusable_buffers - reusable_buffers_est;
4000 13470 : if (new_strategy_delta > 0 && new_recent_alloc > 0)
4001 : {
4002 11308 : scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
4003 11308 : smoothed_density += (scans_per_alloc - smoothed_density) /
4004 : smoothing_samples;
4005 :
4006 : #ifdef BGW_DEBUG
4007 : elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
4008 : new_recent_alloc, new_strategy_delta,
4009 : scans_per_alloc, smoothed_density);
4010 : #endif
4011 : }
4012 :
4013 : /* Return true if OK to hibernate */
4014 13470 : return (bufs_to_lap == 0 && recent_alloc == 0);
4015 : }
4016 :
4017 : /*
4018 : * SyncOneBuffer -- process a single buffer during syncing.
4019 : *
4020 : * If skip_recently_used is true, we don't write currently-pinned buffers, nor
4021 : * buffers marked recently used, as these are not replacement candidates.
4022 : *
4023 : * Returns a bitmask containing the following flag bits:
4024 : * BUF_WRITTEN: we wrote the buffer.
4025 : * BUF_REUSABLE: buffer is available for replacement, ie, it has
4026 : * pin count 0 and usage count 0.
4027 : *
4028 : * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
4029 : * after locking it, but we don't care all that much.)
4030 : */
4031 : static int
4032 2196546 : SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
4033 : {
4034 2196546 : BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
4035 2196546 : int result = 0;
4036 : uint64 buf_state;
4037 : BufferTag tag;
4038 :
4039 : /* Make sure we can handle the pin */
4040 2196546 : ReservePrivateRefCountEntry();
4041 2196546 : ResourceOwnerEnlarge(CurrentResourceOwner);
4042 :
4043 : /*
4044 : * Check whether buffer needs writing.
4045 : *
4046 : * We can make this check without taking the buffer content lock so long
4047 : * as we mark pages dirty in access methods *before* logging changes with
4048 : * XLogInsert(): if someone marks the buffer dirty just after our check we
4049 : * don't worry because our checkpoint.redo points before log record for
4050 : * upcoming changes and so we are not required to write such dirty buffer.
4051 : */
4052 2196546 : buf_state = LockBufHdr(bufHdr);
4053 :
4054 2196546 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
4055 2193474 : BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
4056 : {
4057 1500296 : result |= BUF_REUSABLE;
4058 : }
4059 696250 : else if (skip_recently_used)
4060 : {
4061 : /* Caller told us not to write recently-used buffers */
4062 417289 : UnlockBufHdr(bufHdr);
4063 417289 : return result;
4064 : }
4065 :
4066 1779257 : if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
4067 : {
4068 : /* It's clean, so nothing to do */
4069 1476577 : UnlockBufHdr(bufHdr);
4070 1476577 : return result;
4071 : }
4072 :
4073 : /*
4074 : * Pin it, share-exclusive-lock it, write it. (FlushBuffer will do
4075 : * nothing if the buffer is clean by the time we've locked it.)
4076 : */
4077 302680 : PinBuffer_Locked(bufHdr);
4078 :
4079 302680 : FlushUnlockedBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
4080 :
4081 302680 : tag = bufHdr->tag;
4082 :
4083 302680 : UnpinBuffer(bufHdr);
4084 :
4085 : /*
4086 : * SyncOneBuffer() is only called by checkpointer and bgwriter, so
4087 : * IOContext will always be IOCONTEXT_NORMAL.
4088 : */
4089 302680 : ScheduleBufferTagForWriteback(wb_context, IOCONTEXT_NORMAL, &tag);
4090 :
4091 302680 : return result | BUF_WRITTEN;
4092 : }
4093 :
4094 : /*
4095 : * AtEOXact_Buffers - clean up at end of transaction.
4096 : *
4097 : * As of PostgreSQL 8.0, buffer pins should get released by the
4098 : * ResourceOwner mechanism. This routine is just a debugging
4099 : * cross-check that no pins remain.
4100 : */
4101 : void
4102 564496 : AtEOXact_Buffers(bool isCommit)
4103 : {
4104 564496 : CheckForBufferLeaks();
4105 :
4106 564496 : AtEOXact_LocalBuffers(isCommit);
4107 :
4108 : Assert(PrivateRefCountOverflowed == 0);
4109 564496 : }
4110 :
4111 : /*
4112 : * Initialize access to shared buffer pool
4113 : *
4114 : * This is called during backend startup (whether standalone or under the
4115 : * postmaster). It sets up for this backend's access to the already-existing
4116 : * buffer pool.
4117 : */
4118 : void
4119 23460 : InitBufferManagerAccess(void)
4120 : {
4121 : HASHCTL hash_ctl;
4122 :
4123 : /*
4124 : * An advisory limit on the number of pins each backend should hold, based
4125 : * on shared_buffers and the maximum number of connections possible.
4126 : * That's very pessimistic, but outside toy-sized shared_buffers it should
4127 : * allow plenty of pins. LimitAdditionalPins() and
4128 : * GetAdditionalPinLimit() can be used to check the remaining balance.
4129 : */
4130 23460 : MaxProportionalPins = NBuffers / (MaxBackends + NUM_AUXILIARY_PROCS);
4131 :
4132 23460 : memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
4133 23460 : memset(&PrivateRefCountArrayKeys, 0, sizeof(PrivateRefCountArrayKeys));
4134 :
4135 23460 : hash_ctl.keysize = sizeof(Buffer);
4136 23460 : hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
4137 :
4138 23460 : PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
4139 : HASH_ELEM | HASH_BLOBS);
4140 :
4141 : /*
4142 : * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
4143 : * the corresponding phase of backend shutdown.
4144 : */
4145 : Assert(MyProc != NULL);
4146 23460 : on_shmem_exit(AtProcExit_Buffers, 0);
4147 23460 : }
4148 :
4149 : /*
4150 : * During backend exit, ensure that we released all shared-buffer locks and
4151 : * assert that we have no remaining pins.
4152 : */
4153 : static void
4154 23460 : AtProcExit_Buffers(int code, Datum arg)
4155 : {
4156 23460 : UnlockBuffers();
4157 :
4158 23460 : CheckForBufferLeaks();
4159 :
4160 : /* localbuf.c needs a chance too */
4161 23460 : AtProcExit_LocalBuffers();
4162 23460 : }
4163 :
4164 : /*
4165 : * CheckForBufferLeaks - ensure this backend holds no buffer pins
4166 : *
4167 : * As of PostgreSQL 8.0, buffer pins should get released by the
4168 : * ResourceOwner mechanism. This routine is just a debugging
4169 : * cross-check that no pins remain.
4170 : */
4171 : static void
4172 587956 : CheckForBufferLeaks(void)
4173 : {
4174 : #ifdef USE_ASSERT_CHECKING
4175 : int RefCountErrors = 0;
4176 : PrivateRefCountEntry *res;
4177 : int i;
4178 : char *s;
4179 :
4180 : /* check the array */
4181 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
4182 : {
4183 : if (PrivateRefCountArrayKeys[i] != InvalidBuffer)
4184 : {
4185 : res = &PrivateRefCountArray[i];
4186 :
4187 : s = DebugPrintBufferRefcount(res->buffer);
4188 : elog(WARNING, "buffer refcount leak: %s", s);
4189 : pfree(s);
4190 :
4191 : RefCountErrors++;
4192 : }
4193 : }
4194 :
4195 : /* if necessary search the hash */
4196 : if (PrivateRefCountOverflowed)
4197 : {
4198 : HASH_SEQ_STATUS hstat;
4199 :
4200 : hash_seq_init(&hstat, PrivateRefCountHash);
4201 : while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
4202 : {
4203 : s = DebugPrintBufferRefcount(res->buffer);
4204 : elog(WARNING, "buffer refcount leak: %s", s);
4205 : pfree(s);
4206 : RefCountErrors++;
4207 : }
4208 : }
4209 :
4210 : Assert(RefCountErrors == 0);
4211 : #endif
4212 587956 : }
4213 :
4214 : #ifdef USE_ASSERT_CHECKING
4215 : /*
4216 : * Check for exclusive-locked catalog buffers. This is the core of
4217 : * AssertCouldGetRelation().
4218 : *
4219 : * A backend would self-deadlock on the content lock if the catalog scan read
4220 : * the exclusive-locked buffer. The main threat is exclusive-locked buffers
4221 : * of catalogs used in relcache, because a catcache search on any catalog may
4222 : * build that catalog's relcache entry. We don't have an inventory of
4223 : * catalogs relcache uses, so just check buffers of most catalogs.
4224 : *
4225 : * It's better to minimize waits while holding an exclusive buffer lock, so it
4226 : * would be nice to broaden this check not to be catalog-specific. However,
4227 : * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
4228 : * read tables. That is deadlock-free as long as there's no loop in the
4229 : * dependency graph: modifying table A may cause an opclass to read table B,
4230 : * but it must not cause a read of table A.
4231 : */
4232 : void
4233 : AssertBufferLocksPermitCatalogRead(void)
4234 : {
4235 : PrivateRefCountEntry *res;
4236 :
4237 : /* check the array */
4238 : for (int i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
4239 : {
4240 : if (PrivateRefCountArrayKeys[i] != InvalidBuffer)
4241 : {
4242 : res = &PrivateRefCountArray[i];
4243 :
4244 : if (res->buffer == InvalidBuffer)
4245 : continue;
4246 :
4247 : AssertNotCatalogBufferLock(res->buffer, res->data.lockmode);
4248 : }
4249 : }
4250 :
4251 : /* if necessary search the hash */
4252 : if (PrivateRefCountOverflowed)
4253 : {
4254 : HASH_SEQ_STATUS hstat;
4255 :
4256 : hash_seq_init(&hstat, PrivateRefCountHash);
4257 : while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
4258 : {
4259 : AssertNotCatalogBufferLock(res->buffer, res->data.lockmode);
4260 : }
4261 : }
4262 : }
4263 :
4264 : static void
4265 : AssertNotCatalogBufferLock(Buffer buffer, BufferLockMode mode)
4266 : {
4267 : BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
4268 : BufferTag tag;
4269 : Oid relid;
4270 :
4271 : if (mode != BUFFER_LOCK_EXCLUSIVE)
4272 : return;
4273 :
4274 : tag = bufHdr->tag;
4275 :
4276 : /*
4277 : * This relNumber==relid assumption holds until a catalog experiences
4278 : * VACUUM FULL or similar. After a command like that, relNumber will be
4279 : * in the normal (non-catalog) range, and we lose the ability to detect
4280 : * hazardous access to that catalog. Calling RelidByRelfilenumber() would
4281 : * close that gap, but RelidByRelfilenumber() might then deadlock with a
4282 : * held lock.
4283 : */
4284 : relid = tag.relNumber;
4285 :
4286 : if (IsCatalogTextUniqueIndexOid(relid)) /* see comments at the callee */
4287 : return;
4288 :
4289 : Assert(!IsCatalogRelationOid(relid));
4290 : }
4291 : #endif
4292 :
4293 :
4294 : /*
4295 : * Helper routine to issue warnings when a buffer is unexpectedly pinned
4296 : */
4297 : char *
4298 40 : DebugPrintBufferRefcount(Buffer buffer)
4299 : {
4300 : BufferDesc *buf;
4301 : int32 loccount;
4302 : char *result;
4303 : ProcNumber backend;
4304 : uint64 buf_state;
4305 :
4306 : Assert(BufferIsValid(buffer));
4307 40 : if (BufferIsLocal(buffer))
4308 : {
4309 16 : buf = GetLocalBufferDescriptor(-buffer - 1);
4310 16 : loccount = LocalRefCount[-buffer - 1];
4311 16 : backend = MyProcNumber;
4312 : }
4313 : else
4314 : {
4315 24 : buf = GetBufferDescriptor(buffer - 1);
4316 24 : loccount = GetPrivateRefCount(buffer);
4317 24 : backend = INVALID_PROC_NUMBER;
4318 : }
4319 :
4320 : /* theoretically we should lock the bufHdr here */
4321 40 : buf_state = pg_atomic_read_u64(&buf->state);
4322 :
4323 40 : result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%" PRIx64 ", refcount=%u %d)",
4324 : buffer,
4325 40 : relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
4326 : BufTagGetForkNum(&buf->tag)).str,
4327 : buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
4328 : BUF_STATE_GET_REFCOUNT(buf_state), loccount);
4329 40 : return result;
4330 : }
4331 :
4332 : /*
4333 : * CheckPointBuffers
4334 : *
4335 : * Flush all dirty blocks in buffer pool to disk at checkpoint time.
4336 : *
4337 : * Note: temporary relations do not participate in checkpoints, so they don't
4338 : * need to be flushed.
4339 : */
4340 : void
4341 1808 : CheckPointBuffers(int flags)
4342 : {
4343 1808 : BufferSync(flags);
4344 1808 : }
4345 :
4346 : /*
4347 : * BufferGetBlockNumber
4348 : * Returns the block number associated with a buffer.
4349 : *
4350 : * Note:
4351 : * Assumes that the buffer is valid and pinned, else the
4352 : * value may be obsolete immediately...
4353 : */
4354 : BlockNumber
4355 52271506 : BufferGetBlockNumber(Buffer buffer)
4356 : {
4357 : BufferDesc *bufHdr;
4358 :
4359 : Assert(BufferIsPinned(buffer));
4360 :
4361 52271506 : if (BufferIsLocal(buffer))
4362 1909397 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
4363 : else
4364 50362109 : bufHdr = GetBufferDescriptor(buffer - 1);
4365 :
4366 : /* pinned, so OK to read tag without spinlock */
4367 52271506 : return bufHdr->tag.blockNum;
4368 : }
4369 :
4370 : /*
4371 : * BufferGetTag
4372 : * Returns the relfilelocator, fork number and block number associated with
4373 : * a buffer.
4374 : */
4375 : void
4376 16005355 : BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
4377 : BlockNumber *blknum)
4378 : {
4379 : BufferDesc *bufHdr;
4380 :
4381 : /* Do the same checks as BufferGetBlockNumber. */
4382 : Assert(BufferIsPinned(buffer));
4383 :
4384 16005355 : if (BufferIsLocal(buffer))
4385 0 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
4386 : else
4387 16005355 : bufHdr = GetBufferDescriptor(buffer - 1);
4388 :
4389 : /* pinned, so OK to read tag without spinlock */
4390 16005355 : *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
4391 16005355 : *forknum = BufTagGetForkNum(&bufHdr->tag);
4392 16005355 : *blknum = bufHdr->tag.blockNum;
4393 16005355 : }
4394 :
4395 : /*
4396 : * FlushBuffer
4397 : * Physically write out a shared buffer.
4398 : *
4399 : * NOTE: this actually just passes the buffer contents to the kernel; the
4400 : * real write to disk won't happen until the kernel feels like it. This
4401 : * is okay from our point of view since we can redo the changes from WAL.
4402 : * However, we will need to force the changes to disk via fsync before
4403 : * we can checkpoint WAL.
4404 : *
4405 : * The caller must hold a pin on the buffer and have
4406 : * (share-)exclusively-locked the buffer contents.
4407 : *
4408 : * If the caller has an smgr reference for the buffer's relation, pass it
4409 : * as the second parameter. If not, pass NULL.
4410 : */
4411 : static void
4412 579405 : FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
4413 : IOContext io_context)
4414 : {
4415 : XLogRecPtr recptr;
4416 : ErrorContextCallback errcallback;
4417 : instr_time io_start;
4418 : Block bufBlock;
4419 : char *bufToWrite;
4420 : uint64 buf_state;
4421 :
4422 : Assert(BufferLockHeldByMeInMode(buf, BUFFER_LOCK_EXCLUSIVE) ||
4423 : BufferLockHeldByMeInMode(buf, BUFFER_LOCK_SHARE_EXCLUSIVE));
4424 :
4425 : /*
4426 : * Try to start an I/O operation. If StartBufferIO returns false, then
4427 : * someone else flushed the buffer before we could, so we need not do
4428 : * anything.
4429 : */
4430 579405 : if (!StartBufferIO(buf, false, false))
4431 14 : return;
4432 :
4433 : /* Setup error traceback support for ereport() */
4434 579391 : errcallback.callback = shared_buffer_write_error_callback;
4435 579391 : errcallback.arg = buf;
4436 579391 : errcallback.previous = error_context_stack;
4437 579391 : error_context_stack = &errcallback;
4438 :
4439 : /* Find smgr relation for buffer */
4440 579391 : if (reln == NULL)
4441 576831 : reln = smgropen(BufTagGetRelFileLocator(&buf->tag), INVALID_PROC_NUMBER);
4442 :
4443 : TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
4444 : buf->tag.blockNum,
4445 : reln->smgr_rlocator.locator.spcOid,
4446 : reln->smgr_rlocator.locator.dbOid,
4447 : reln->smgr_rlocator.locator.relNumber);
4448 :
4449 579391 : buf_state = LockBufHdr(buf);
4450 :
4451 : /*
4452 : * As we hold at least a share-exclusive lock on the buffer, the LSN
4453 : * cannot change during the flush (and thus can't be torn).
4454 : */
4455 579391 : recptr = BufferGetLSN(buf);
4456 :
4457 : /* To check if block content changes while flushing. - vadim 01/17/97 */
4458 579391 : UnlockBufHdrExt(buf, buf_state,
4459 : 0, BM_JUST_DIRTIED,
4460 : 0);
4461 :
4462 : /*
4463 : * Force XLOG flush up to buffer's LSN. This implements the basic WAL
4464 : * rule that log updates must hit disk before any of the data-file changes
4465 : * they describe do.
4466 : *
4467 : * However, this rule does not apply to unlogged relations, which will be
4468 : * lost after a crash anyway. Most unlogged relation pages do not bear
4469 : * LSNs since we never emit WAL records for them, and therefore flushing
4470 : * up through the buffer LSN would be useless, but harmless. However,
4471 : * GiST indexes use LSNs internally to track page-splits, and therefore
4472 : * unlogged GiST pages bear "fake" LSNs generated by
4473 : * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
4474 : * LSN counter could advance past the WAL insertion point; and if it did
4475 : * happen, attempting to flush WAL through that location would fail, with
4476 : * disastrous system-wide consequences. To make sure that can't happen,
4477 : * skip the flush if the buffer isn't permanent.
4478 : */
4479 579391 : if (buf_state & BM_PERMANENT)
4480 577591 : XLogFlush(recptr);
4481 :
4482 : /*
4483 : * Now it's safe to write the buffer to disk. Note that no one else should
4484 : * have been able to write it, while we were busy with log flushing,
4485 : * because we got the exclusive right to perform I/O by setting the
4486 : * BM_IO_IN_PROGRESS bit.
4487 : */
4488 579391 : bufBlock = BufHdrGetBlock(buf);
4489 :
4490 : /*
4491 : * Update page checksum if desired. Since we have only shared lock on the
4492 : * buffer, other processes might be updating hint bits in it, so we must
4493 : * copy the page to private storage if we do checksumming.
4494 : */
4495 579391 : bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
4496 :
4497 579391 : io_start = pgstat_prepare_io_time(track_io_timing);
4498 :
4499 : /*
4500 : * bufToWrite is either the shared buffer or a copy, as appropriate.
4501 : */
4502 579391 : smgrwrite(reln,
4503 579391 : BufTagGetForkNum(&buf->tag),
4504 : buf->tag.blockNum,
4505 : bufToWrite,
4506 : false);
4507 :
4508 : /*
4509 : * When a strategy is in use, only flushes of dirty buffers already in the
4510 : * strategy ring are counted as strategy writes (IOCONTEXT
4511 : * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
4512 : * statistics tracking.
4513 : *
4514 : * If a shared buffer initially added to the ring must be flushed before
4515 : * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
4516 : *
4517 : * If a shared buffer which was added to the ring later because the
4518 : * current strategy buffer is pinned or in use or because all strategy
4519 : * buffers were dirty and rejected (for BAS_BULKREAD operations only)
4520 : * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
4521 : * (from_ring will be false).
4522 : *
4523 : * When a strategy is not in use, the write can only be a "regular" write
4524 : * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
4525 : */
4526 579391 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
4527 : IOOP_WRITE, io_start, 1, BLCKSZ);
4528 :
4529 579391 : pgBufferUsage.shared_blks_written++;
4530 :
4531 : /*
4532 : * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
4533 : * end the BM_IO_IN_PROGRESS state.
4534 : */
4535 579391 : TerminateBufferIO(buf, true, 0, true, false);
4536 :
4537 : TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
4538 : buf->tag.blockNum,
4539 : reln->smgr_rlocator.locator.spcOid,
4540 : reln->smgr_rlocator.locator.dbOid,
4541 : reln->smgr_rlocator.locator.relNumber);
4542 :
4543 : /* Pop the error context stack */
4544 579391 : error_context_stack = errcallback.previous;
4545 : }
4546 :
4547 : /*
4548 : * Convenience wrapper around FlushBuffer() that locks/unlocks the buffer
4549 : * before/after calling FlushBuffer().
4550 : */
4551 : static void
4552 306239 : FlushUnlockedBuffer(BufferDesc *buf, SMgrRelation reln,
4553 : IOObject io_object, IOContext io_context)
4554 : {
4555 306239 : Buffer buffer = BufferDescriptorGetBuffer(buf);
4556 :
4557 306239 : BufferLockAcquire(buffer, buf, BUFFER_LOCK_SHARE_EXCLUSIVE);
4558 306239 : FlushBuffer(buf, reln, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
4559 306239 : BufferLockUnlock(buffer, buf);
4560 306239 : }
4561 :
4562 : /*
4563 : * RelationGetNumberOfBlocksInFork
4564 : * Determines the current number of pages in the specified relation fork.
4565 : *
4566 : * Note that the accuracy of the result will depend on the details of the
4567 : * relation's storage. For builtin AMs it'll be accurate, but for external AMs
4568 : * it might not be.
4569 : */
4570 : BlockNumber
4571 2225333 : RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
4572 : {
4573 2225333 : if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
4574 : {
4575 : /*
4576 : * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
4577 : * tableam returns the size in bytes - but for the purpose of this
4578 : * routine, we want the number of blocks. Therefore divide, rounding
4579 : * up.
4580 : */
4581 : uint64 szbytes;
4582 :
4583 1722031 : szbytes = table_relation_size(relation, forkNum);
4584 :
4585 1722012 : return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
4586 : }
4587 503302 : else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
4588 : {
4589 503302 : return smgrnblocks(RelationGetSmgr(relation), forkNum);
4590 : }
4591 : else
4592 : Assert(false);
4593 :
4594 0 : return 0; /* keep compiler quiet */
4595 : }
4596 :
4597 : /*
4598 : * BufferIsPermanent
4599 : * Determines whether a buffer will potentially still be around after
4600 : * a crash. Caller must hold a buffer pin.
4601 : */
4602 : bool
4603 9560942 : BufferIsPermanent(Buffer buffer)
4604 : {
4605 : BufferDesc *bufHdr;
4606 :
4607 : /* Local buffers are used only for temp relations. */
4608 9560942 : if (BufferIsLocal(buffer))
4609 630173 : return false;
4610 :
4611 : /* Make sure we've got a real buffer, and that we hold a pin on it. */
4612 : Assert(BufferIsValid(buffer));
4613 : Assert(BufferIsPinned(buffer));
4614 :
4615 : /*
4616 : * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
4617 : * need not bother with the buffer header spinlock. Even if someone else
4618 : * changes the buffer header state while we're doing this, the state is
4619 : * changed atomically, so we'll read the old value or the new value, but
4620 : * not random garbage.
4621 : */
4622 8930769 : bufHdr = GetBufferDescriptor(buffer - 1);
4623 8930769 : return (pg_atomic_read_u64(&bufHdr->state) & BM_PERMANENT) != 0;
4624 : }
4625 :
4626 : /*
4627 : * BufferGetLSNAtomic
4628 : * Retrieves the LSN of the buffer atomically using a buffer header lock.
4629 : * This is necessary for some callers who may only hold a share lock on
4630 : * the buffer. A share lock allows a concurrent backend to set hint bits
4631 : * on the page, which in turn may require a WAL record to be emitted.
4632 : */
4633 : XLogRecPtr
4634 7367639 : BufferGetLSNAtomic(Buffer buffer)
4635 : {
4636 7367639 : char *page = BufferGetPage(buffer);
4637 : BufferDesc *bufHdr;
4638 : XLogRecPtr lsn;
4639 :
4640 : /*
4641 : * If we don't need locking for correctness, fastpath out.
4642 : */
4643 7367639 : if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
4644 238151 : return PageGetLSN(page);
4645 :
4646 : /* Make sure we've got a real buffer, and that we hold a pin on it. */
4647 : Assert(BufferIsValid(buffer));
4648 : Assert(BufferIsPinned(buffer));
4649 :
4650 7129488 : bufHdr = GetBufferDescriptor(buffer - 1);
4651 7129488 : LockBufHdr(bufHdr);
4652 7129488 : lsn = PageGetLSN(page);
4653 7129488 : UnlockBufHdr(bufHdr);
4654 :
4655 7129488 : return lsn;
4656 : }
4657 :
4658 : /* ---------------------------------------------------------------------
4659 : * DropRelationBuffers
4660 : *
4661 : * This function removes from the buffer pool all the pages of the
4662 : * specified relation forks that have block numbers >= firstDelBlock.
4663 : * (In particular, with firstDelBlock = 0, all pages are removed.)
4664 : * Dirty pages are simply dropped, without bothering to write them
4665 : * out first. Therefore, this is NOT rollback-able, and so should be
4666 : * used only with extreme caution!
4667 : *
4668 : * Currently, this is called only from smgr.c when the underlying file
4669 : * is about to be deleted or truncated (firstDelBlock is needed for
4670 : * the truncation case). The data in the affected pages would therefore
4671 : * be deleted momentarily anyway, and there is no point in writing it.
4672 : * It is the responsibility of higher-level code to ensure that the
4673 : * deletion or truncation does not lose any data that could be needed
4674 : * later. It is also the responsibility of higher-level code to ensure
4675 : * that no other process could be trying to load more pages of the
4676 : * relation into buffers.
4677 : * --------------------------------------------------------------------
4678 : */
4679 : void
4680 649 : DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
4681 : int nforks, BlockNumber *firstDelBlock)
4682 : {
4683 : int i;
4684 : int j;
4685 : RelFileLocatorBackend rlocator;
4686 : BlockNumber nForkBlock[MAX_FORKNUM];
4687 649 : uint64 nBlocksToInvalidate = 0;
4688 :
4689 649 : rlocator = smgr_reln->smgr_rlocator;
4690 :
4691 : /* If it's a local relation, it's localbuf.c's problem. */
4692 649 : if (RelFileLocatorBackendIsTemp(rlocator))
4693 : {
4694 375 : if (rlocator.backend == MyProcNumber)
4695 375 : DropRelationLocalBuffers(rlocator.locator, forkNum, nforks,
4696 : firstDelBlock);
4697 :
4698 418 : return;
4699 : }
4700 :
4701 : /*
4702 : * To remove all the pages of the specified relation forks from the buffer
4703 : * pool, we need to scan the entire buffer pool but we can optimize it by
4704 : * finding the buffers from BufMapping table provided we know the exact
4705 : * size of each fork of the relation. The exact size is required to ensure
4706 : * that we don't leave any buffer for the relation being dropped as
4707 : * otherwise the background writer or checkpointer can lead to a PANIC
4708 : * error while flushing buffers corresponding to files that don't exist.
4709 : *
4710 : * To know the exact size, we rely on the size cached for each fork by us
4711 : * during recovery which limits the optimization to recovery and on
4712 : * standbys but we can easily extend it once we have shared cache for
4713 : * relation size.
4714 : *
4715 : * In recovery, we cache the value returned by the first lseek(SEEK_END)
4716 : * and the future writes keeps the cached value up-to-date. See
4717 : * smgrextend. It is possible that the value of the first lseek is smaller
4718 : * than the actual number of existing blocks in the file due to buggy
4719 : * Linux kernels that might not have accounted for the recent write. But
4720 : * that should be fine because there must not be any buffers after that
4721 : * file size.
4722 : */
4723 382 : for (i = 0; i < nforks; i++)
4724 : {
4725 : /* Get the number of blocks for a relation's fork */
4726 325 : nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
4727 :
4728 325 : if (nForkBlock[i] == InvalidBlockNumber)
4729 : {
4730 217 : nBlocksToInvalidate = InvalidBlockNumber;
4731 217 : break;
4732 : }
4733 :
4734 : /* calculate the number of blocks to be invalidated */
4735 108 : nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
4736 : }
4737 :
4738 : /*
4739 : * We apply the optimization iff the total number of blocks to invalidate
4740 : * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4741 : */
4742 274 : if (BlockNumberIsValid(nBlocksToInvalidate) &&
4743 57 : nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4744 : {
4745 118 : for (j = 0; j < nforks; j++)
4746 75 : FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
4747 75 : nForkBlock[j], firstDelBlock[j]);
4748 43 : return;
4749 : }
4750 :
4751 2939623 : for (i = 0; i < NBuffers; i++)
4752 : {
4753 2939392 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4754 :
4755 : /*
4756 : * We can make this a tad faster by prechecking the buffer tag before
4757 : * we attempt to lock the buffer; this saves a lot of lock
4758 : * acquisitions in typical cases. It should be safe because the
4759 : * caller must have AccessExclusiveLock on the relation, or some other
4760 : * reason to be certain that no one is loading new pages of the rel
4761 : * into the buffer pool. (Otherwise we might well miss such pages
4762 : * entirely.) Therefore, while the tag might be changing while we
4763 : * look at it, it can't be changing *to* a value we care about, only
4764 : * *away* from such a value. So false negatives are impossible, and
4765 : * false positives are safe because we'll recheck after getting the
4766 : * buffer lock.
4767 : *
4768 : * We could check forkNum and blockNum as well as the rlocator, but
4769 : * the incremental win from doing so seems small.
4770 : */
4771 2939392 : if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
4772 2932514 : continue;
4773 :
4774 6878 : LockBufHdr(bufHdr);
4775 :
4776 17508 : for (j = 0; j < nforks; j++)
4777 : {
4778 12329 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
4779 12329 : BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
4780 6786 : bufHdr->tag.blockNum >= firstDelBlock[j])
4781 : {
4782 1699 : InvalidateBuffer(bufHdr); /* releases spinlock */
4783 1699 : break;
4784 : }
4785 : }
4786 6878 : if (j >= nforks)
4787 5179 : UnlockBufHdr(bufHdr);
4788 : }
4789 : }
4790 :
4791 : /* ---------------------------------------------------------------------
4792 : * DropRelationsAllBuffers
4793 : *
4794 : * This function removes from the buffer pool all the pages of all
4795 : * forks of the specified relations. It's equivalent to calling
4796 : * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
4797 : * --------------------------------------------------------------------
4798 : */
4799 : void
4800 14458 : DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
4801 : {
4802 : int i;
4803 14458 : int n = 0;
4804 : SMgrRelation *rels;
4805 : BlockNumber (*block)[MAX_FORKNUM + 1];
4806 14458 : uint64 nBlocksToInvalidate = 0;
4807 : RelFileLocator *locators;
4808 14458 : bool cached = true;
4809 : bool use_bsearch;
4810 :
4811 14458 : if (nlocators == 0)
4812 0 : return;
4813 :
4814 14458 : rels = palloc_array(SMgrRelation, nlocators); /* non-local relations */
4815 :
4816 : /* If it's a local relation, it's localbuf.c's problem. */
4817 63126 : for (i = 0; i < nlocators; i++)
4818 : {
4819 48668 : if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
4820 : {
4821 3303 : if (smgr_reln[i]->smgr_rlocator.backend == MyProcNumber)
4822 3303 : DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
4823 : }
4824 : else
4825 45365 : rels[n++] = smgr_reln[i];
4826 : }
4827 :
4828 : /*
4829 : * If there are no non-local relations, then we're done. Release the
4830 : * memory and return.
4831 : */
4832 14458 : if (n == 0)
4833 : {
4834 883 : pfree(rels);
4835 883 : return;
4836 : }
4837 :
4838 : /*
4839 : * This is used to remember the number of blocks for all the relations
4840 : * forks.
4841 : */
4842 : block = (BlockNumber (*)[MAX_FORKNUM + 1])
4843 13575 : palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
4844 :
4845 : /*
4846 : * We can avoid scanning the entire buffer pool if we know the exact size
4847 : * of each of the given relation forks. See DropRelationBuffers.
4848 : */
4849 28469 : for (i = 0; i < n && cached; i++)
4850 : {
4851 23482 : for (int j = 0; j <= MAX_FORKNUM; j++)
4852 : {
4853 : /* Get the number of blocks for a relation's fork. */
4854 21349 : block[i][j] = smgrnblocks_cached(rels[i], j);
4855 :
4856 : /* We need to only consider the relation forks that exists. */
4857 21349 : if (block[i][j] == InvalidBlockNumber)
4858 : {
4859 19041 : if (!smgrexists(rels[i], j))
4860 6280 : continue;
4861 12761 : cached = false;
4862 12761 : break;
4863 : }
4864 :
4865 : /* calculate the total number of blocks to be invalidated */
4866 2308 : nBlocksToInvalidate += block[i][j];
4867 : }
4868 : }
4869 :
4870 : /*
4871 : * We apply the optimization iff the total number of blocks to invalidate
4872 : * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4873 : */
4874 13575 : if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4875 : {
4876 1361 : for (i = 0; i < n; i++)
4877 : {
4878 3755 : for (int j = 0; j <= MAX_FORKNUM; j++)
4879 : {
4880 : /* ignore relation forks that doesn't exist */
4881 3004 : if (!BlockNumberIsValid(block[i][j]))
4882 2244 : continue;
4883 :
4884 : /* drop all the buffers for a particular relation fork */
4885 760 : FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
4886 760 : j, block[i][j], 0);
4887 : }
4888 : }
4889 :
4890 610 : pfree(block);
4891 610 : pfree(rels);
4892 610 : return;
4893 : }
4894 :
4895 12965 : pfree(block);
4896 12965 : locators = palloc_array(RelFileLocator, n); /* non-local relations */
4897 57579 : for (i = 0; i < n; i++)
4898 44614 : locators[i] = rels[i]->smgr_rlocator.locator;
4899 :
4900 : /*
4901 : * For low number of relations to drop just use a simple walk through, to
4902 : * save the bsearch overhead. The threshold to use is rather a guess than
4903 : * an exactly determined value, as it depends on many factors (CPU and RAM
4904 : * speeds, amount of shared buffers etc.).
4905 : */
4906 12965 : use_bsearch = n > RELS_BSEARCH_THRESHOLD;
4907 :
4908 : /* sort the list of rlocators if necessary */
4909 12965 : if (use_bsearch)
4910 178 : qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
4911 :
4912 139685925 : for (i = 0; i < NBuffers; i++)
4913 : {
4914 139672960 : RelFileLocator *rlocator = NULL;
4915 139672960 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4916 :
4917 : /*
4918 : * As in DropRelationBuffers, an unlocked precheck should be safe and
4919 : * saves some cycles.
4920 : */
4921 :
4922 139672960 : if (!use_bsearch)
4923 : {
4924 : int j;
4925 :
4926 558803322 : for (j = 0; j < n; j++)
4927 : {
4928 421126784 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
4929 : {
4930 87942 : rlocator = &locators[j];
4931 87942 : break;
4932 : }
4933 : }
4934 : }
4935 : else
4936 : {
4937 : RelFileLocator locator;
4938 :
4939 1908480 : locator = BufTagGetRelFileLocator(&bufHdr->tag);
4940 1908480 : rlocator = bsearch(&locator,
4941 : locators, n, sizeof(RelFileLocator),
4942 : rlocator_comparator);
4943 : }
4944 :
4945 : /* buffer doesn't belong to any of the given relfilelocators; skip it */
4946 139672960 : if (rlocator == NULL)
4947 139583448 : continue;
4948 :
4949 89512 : LockBufHdr(bufHdr);
4950 89512 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
4951 89512 : InvalidateBuffer(bufHdr); /* releases spinlock */
4952 : else
4953 0 : UnlockBufHdr(bufHdr);
4954 : }
4955 :
4956 12965 : pfree(locators);
4957 12965 : pfree(rels);
4958 : }
4959 :
4960 : /* ---------------------------------------------------------------------
4961 : * FindAndDropRelationBuffers
4962 : *
4963 : * This function performs look up in BufMapping table and removes from the
4964 : * buffer pool all the pages of the specified relation fork that has block
4965 : * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
4966 : * pages are removed.)
4967 : * --------------------------------------------------------------------
4968 : */
4969 : static void
4970 835 : FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
4971 : BlockNumber nForkBlock,
4972 : BlockNumber firstDelBlock)
4973 : {
4974 : BlockNumber curBlock;
4975 :
4976 2008 : for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
4977 : {
4978 : uint32 bufHash; /* hash value for tag */
4979 : BufferTag bufTag; /* identity of requested block */
4980 : LWLock *bufPartitionLock; /* buffer partition lock for it */
4981 : int buf_id;
4982 : BufferDesc *bufHdr;
4983 :
4984 : /* create a tag so we can lookup the buffer */
4985 1173 : InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
4986 :
4987 : /* determine its hash code and partition lock ID */
4988 1173 : bufHash = BufTableHashCode(&bufTag);
4989 1173 : bufPartitionLock = BufMappingPartitionLock(bufHash);
4990 :
4991 : /* Check that it is in the buffer pool. If not, do nothing. */
4992 1173 : LWLockAcquire(bufPartitionLock, LW_SHARED);
4993 1173 : buf_id = BufTableLookup(&bufTag, bufHash);
4994 1173 : LWLockRelease(bufPartitionLock);
4995 :
4996 1173 : if (buf_id < 0)
4997 130 : continue;
4998 :
4999 1043 : bufHdr = GetBufferDescriptor(buf_id);
5000 :
5001 : /*
5002 : * We need to lock the buffer header and recheck if the buffer is
5003 : * still associated with the same block because the buffer could be
5004 : * evicted by some other backend loading blocks for a different
5005 : * relation after we release lock on the BufMapping table.
5006 : */
5007 1043 : LockBufHdr(bufHdr);
5008 :
5009 2086 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
5010 1043 : BufTagGetForkNum(&bufHdr->tag) == forkNum &&
5011 1043 : bufHdr->tag.blockNum >= firstDelBlock)
5012 1043 : InvalidateBuffer(bufHdr); /* releases spinlock */
5013 : else
5014 0 : UnlockBufHdr(bufHdr);
5015 : }
5016 835 : }
5017 :
5018 : /* ---------------------------------------------------------------------
5019 : * DropDatabaseBuffers
5020 : *
5021 : * This function removes all the buffers in the buffer cache for a
5022 : * particular database. Dirty pages are simply dropped, without
5023 : * bothering to write them out first. This is used when we destroy a
5024 : * database, to avoid trying to flush data to disk when the directory
5025 : * tree no longer exists. Implementation is pretty similar to
5026 : * DropRelationBuffers() which is for destroying just one relation.
5027 : * --------------------------------------------------------------------
5028 : */
5029 : void
5030 78 : DropDatabaseBuffers(Oid dbid)
5031 : {
5032 : int i;
5033 :
5034 : /*
5035 : * We needn't consider local buffers, since by assumption the target
5036 : * database isn't our own.
5037 : */
5038 :
5039 579022 : for (i = 0; i < NBuffers; i++)
5040 : {
5041 578944 : BufferDesc *bufHdr = GetBufferDescriptor(i);
5042 :
5043 : /*
5044 : * As in DropRelationBuffers, an unlocked precheck should be safe and
5045 : * saves some cycles.
5046 : */
5047 578944 : if (bufHdr->tag.dbOid != dbid)
5048 564069 : continue;
5049 :
5050 14875 : LockBufHdr(bufHdr);
5051 14875 : if (bufHdr->tag.dbOid == dbid)
5052 14875 : InvalidateBuffer(bufHdr); /* releases spinlock */
5053 : else
5054 0 : UnlockBufHdr(bufHdr);
5055 : }
5056 78 : }
5057 :
5058 : /* ---------------------------------------------------------------------
5059 : * FlushRelationBuffers
5060 : *
5061 : * This function writes all dirty pages of a relation out to disk
5062 : * (or more accurately, out to kernel disk buffers), ensuring that the
5063 : * kernel has an up-to-date view of the relation.
5064 : *
5065 : * Generally, the caller should be holding AccessExclusiveLock on the
5066 : * target relation to ensure that no other backend is busy dirtying
5067 : * more blocks of the relation; the effects can't be expected to last
5068 : * after the lock is released.
5069 : *
5070 : * XXX currently it sequentially searches the buffer pool, should be
5071 : * changed to more clever ways of searching. This routine is not
5072 : * used in any performance-critical code paths, so it's not worth
5073 : * adding additional overhead to normal paths to make it go faster.
5074 : * --------------------------------------------------------------------
5075 : */
5076 : void
5077 140 : FlushRelationBuffers(Relation rel)
5078 : {
5079 : int i;
5080 : BufferDesc *bufHdr;
5081 140 : SMgrRelation srel = RelationGetSmgr(rel);
5082 :
5083 140 : if (RelationUsesLocalBuffers(rel))
5084 : {
5085 909 : for (i = 0; i < NLocBuffer; i++)
5086 : {
5087 : uint64 buf_state;
5088 :
5089 900 : bufHdr = GetLocalBufferDescriptor(i);
5090 900 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
5091 300 : ((buf_state = pg_atomic_read_u64(&bufHdr->state)) &
5092 : (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5093 : {
5094 : ErrorContextCallback errcallback;
5095 :
5096 : /* Setup error traceback support for ereport() */
5097 300 : errcallback.callback = local_buffer_write_error_callback;
5098 300 : errcallback.arg = bufHdr;
5099 300 : errcallback.previous = error_context_stack;
5100 300 : error_context_stack = &errcallback;
5101 :
5102 : /* Make sure we can handle the pin */
5103 300 : ReservePrivateRefCountEntry();
5104 300 : ResourceOwnerEnlarge(CurrentResourceOwner);
5105 :
5106 : /*
5107 : * Pin/unpin mostly to make valgrind work, but it also seems
5108 : * like the right thing to do.
5109 : */
5110 300 : PinLocalBuffer(bufHdr, false);
5111 :
5112 :
5113 300 : FlushLocalBuffer(bufHdr, srel);
5114 :
5115 300 : UnpinLocalBuffer(BufferDescriptorGetBuffer(bufHdr));
5116 :
5117 : /* Pop the error context stack */
5118 300 : error_context_stack = errcallback.previous;
5119 : }
5120 : }
5121 :
5122 9 : return;
5123 : }
5124 :
5125 1544963 : for (i = 0; i < NBuffers; i++)
5126 : {
5127 : uint64 buf_state;
5128 :
5129 1544832 : bufHdr = GetBufferDescriptor(i);
5130 :
5131 : /*
5132 : * As in DropRelationBuffers, an unlocked precheck should be safe and
5133 : * saves some cycles.
5134 : */
5135 1544832 : if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
5136 1544619 : continue;
5137 :
5138 : /* Make sure we can handle the pin */
5139 213 : ReservePrivateRefCountEntry();
5140 213 : ResourceOwnerEnlarge(CurrentResourceOwner);
5141 :
5142 213 : buf_state = LockBufHdr(bufHdr);
5143 213 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
5144 213 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5145 : {
5146 173 : PinBuffer_Locked(bufHdr);
5147 173 : FlushUnlockedBuffer(bufHdr, srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5148 173 : UnpinBuffer(bufHdr);
5149 : }
5150 : else
5151 40 : UnlockBufHdr(bufHdr);
5152 : }
5153 : }
5154 :
5155 : /* ---------------------------------------------------------------------
5156 : * FlushRelationsAllBuffers
5157 : *
5158 : * This function flushes out of the buffer pool all the pages of all
5159 : * forks of the specified smgr relations. It's equivalent to calling
5160 : * FlushRelationBuffers once per relation. The relations are assumed not
5161 : * to use local buffers.
5162 : * --------------------------------------------------------------------
5163 : */
5164 : void
5165 9 : FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
5166 : {
5167 : int i;
5168 : SMgrSortArray *srels;
5169 : bool use_bsearch;
5170 :
5171 9 : if (nrels == 0)
5172 0 : return;
5173 :
5174 : /* fill-in array for qsort */
5175 9 : srels = palloc_array(SMgrSortArray, nrels);
5176 :
5177 28 : for (i = 0; i < nrels; i++)
5178 : {
5179 : Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
5180 :
5181 19 : srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
5182 19 : srels[i].srel = smgrs[i];
5183 : }
5184 :
5185 : /*
5186 : * Save the bsearch overhead for low number of relations to sync. See
5187 : * DropRelationsAllBuffers for details.
5188 : */
5189 9 : use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
5190 :
5191 : /* sort the list of SMgrRelations if necessary */
5192 9 : if (use_bsearch)
5193 0 : qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
5194 :
5195 147465 : for (i = 0; i < NBuffers; i++)
5196 : {
5197 147456 : SMgrSortArray *srelent = NULL;
5198 147456 : BufferDesc *bufHdr = GetBufferDescriptor(i);
5199 : uint64 buf_state;
5200 :
5201 : /*
5202 : * As in DropRelationBuffers, an unlocked precheck should be safe and
5203 : * saves some cycles.
5204 : */
5205 :
5206 147456 : if (!use_bsearch)
5207 : {
5208 : int j;
5209 :
5210 456121 : for (j = 0; j < nrels; j++)
5211 : {
5212 311091 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
5213 : {
5214 2426 : srelent = &srels[j];
5215 2426 : break;
5216 : }
5217 : }
5218 : }
5219 : else
5220 : {
5221 : RelFileLocator rlocator;
5222 :
5223 0 : rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
5224 0 : srelent = bsearch(&rlocator,
5225 : srels, nrels, sizeof(SMgrSortArray),
5226 : rlocator_comparator);
5227 : }
5228 :
5229 : /* buffer doesn't belong to any of the given relfilelocators; skip it */
5230 147456 : if (srelent == NULL)
5231 145030 : continue;
5232 :
5233 : /* Make sure we can handle the pin */
5234 2426 : ReservePrivateRefCountEntry();
5235 2426 : ResourceOwnerEnlarge(CurrentResourceOwner);
5236 :
5237 2426 : buf_state = LockBufHdr(bufHdr);
5238 2426 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
5239 2426 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5240 : {
5241 2387 : PinBuffer_Locked(bufHdr);
5242 2387 : FlushUnlockedBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5243 2387 : UnpinBuffer(bufHdr);
5244 : }
5245 : else
5246 39 : UnlockBufHdr(bufHdr);
5247 : }
5248 :
5249 9 : pfree(srels);
5250 : }
5251 :
5252 : /* ---------------------------------------------------------------------
5253 : * RelationCopyStorageUsingBuffer
5254 : *
5255 : * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
5256 : * of using smgrread and smgrextend this will copy using bufmgr APIs.
5257 : *
5258 : * Refer comments atop CreateAndCopyRelationData() for details about
5259 : * 'permanent' parameter.
5260 : * --------------------------------------------------------------------
5261 : */
5262 : static void
5263 77214 : RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
5264 : RelFileLocator dstlocator,
5265 : ForkNumber forkNum, bool permanent)
5266 : {
5267 : Buffer srcBuf;
5268 : Buffer dstBuf;
5269 : Page srcPage;
5270 : Page dstPage;
5271 : bool use_wal;
5272 : BlockNumber nblocks;
5273 : BlockNumber blkno;
5274 : PGIOAlignedBlock buf;
5275 : BufferAccessStrategy bstrategy_src;
5276 : BufferAccessStrategy bstrategy_dst;
5277 : BlockRangeReadStreamPrivate p;
5278 : ReadStream *src_stream;
5279 : SMgrRelation src_smgr;
5280 :
5281 : /*
5282 : * In general, we want to write WAL whenever wal_level > 'minimal', but we
5283 : * can skip it when copying any fork of an unlogged relation other than
5284 : * the init fork.
5285 : */
5286 77214 : use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
5287 :
5288 : /* Get number of blocks in the source relation. */
5289 77214 : nblocks = smgrnblocks(smgropen(srclocator, INVALID_PROC_NUMBER),
5290 : forkNum);
5291 :
5292 : /* Nothing to copy; just return. */
5293 77214 : if (nblocks == 0)
5294 13471 : return;
5295 :
5296 : /*
5297 : * Bulk extend the destination relation of the same size as the source
5298 : * relation before starting to copy block by block.
5299 : */
5300 63743 : memset(buf.data, 0, BLCKSZ);
5301 63743 : smgrextend(smgropen(dstlocator, INVALID_PROC_NUMBER), forkNum, nblocks - 1,
5302 : buf.data, true);
5303 :
5304 : /* This is a bulk operation, so use buffer access strategies. */
5305 63743 : bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
5306 63743 : bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
5307 :
5308 : /* Initialize streaming read */
5309 63743 : p.current_blocknum = 0;
5310 63743 : p.last_exclusive = nblocks;
5311 63743 : src_smgr = smgropen(srclocator, INVALID_PROC_NUMBER);
5312 :
5313 : /*
5314 : * It is safe to use batchmode as block_range_read_stream_cb takes no
5315 : * locks.
5316 : */
5317 63743 : src_stream = read_stream_begin_smgr_relation(READ_STREAM_FULL |
5318 : READ_STREAM_USE_BATCHING,
5319 : bstrategy_src,
5320 : src_smgr,
5321 : permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
5322 : forkNum,
5323 : block_range_read_stream_cb,
5324 : &p,
5325 : 0);
5326 :
5327 : /* Iterate over each block of the source relation file. */
5328 305520 : for (blkno = 0; blkno < nblocks; blkno++)
5329 : {
5330 241779 : CHECK_FOR_INTERRUPTS();
5331 :
5332 : /* Read block from source relation. */
5333 241779 : srcBuf = read_stream_next_buffer(src_stream, NULL);
5334 241777 : LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
5335 241777 : srcPage = BufferGetPage(srcBuf);
5336 :
5337 241777 : dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum,
5338 : BufferGetBlockNumber(srcBuf),
5339 : RBM_ZERO_AND_LOCK, bstrategy_dst,
5340 : permanent);
5341 241777 : dstPage = BufferGetPage(dstBuf);
5342 :
5343 241777 : START_CRIT_SECTION();
5344 :
5345 : /* Copy page data from the source to the destination. */
5346 241777 : memcpy(dstPage, srcPage, BLCKSZ);
5347 241777 : MarkBufferDirty(dstBuf);
5348 :
5349 : /* WAL-log the copied page. */
5350 241777 : if (use_wal)
5351 137249 : log_newpage_buffer(dstBuf, true);
5352 :
5353 241777 : END_CRIT_SECTION();
5354 :
5355 241777 : UnlockReleaseBuffer(dstBuf);
5356 241777 : UnlockReleaseBuffer(srcBuf);
5357 : }
5358 : Assert(read_stream_next_buffer(src_stream, NULL) == InvalidBuffer);
5359 63741 : read_stream_end(src_stream);
5360 :
5361 63741 : FreeAccessStrategy(bstrategy_src);
5362 63741 : FreeAccessStrategy(bstrategy_dst);
5363 : }
5364 :
5365 : /* ---------------------------------------------------------------------
5366 : * CreateAndCopyRelationData
5367 : *
5368 : * Create destination relation storage and copy all forks from the
5369 : * source relation to the destination.
5370 : *
5371 : * Pass permanent as true for permanent relations and false for
5372 : * unlogged relations. Currently this API is not supported for
5373 : * temporary relations.
5374 : * --------------------------------------------------------------------
5375 : */
5376 : void
5377 58044 : CreateAndCopyRelationData(RelFileLocator src_rlocator,
5378 : RelFileLocator dst_rlocator, bool permanent)
5379 : {
5380 : char relpersistence;
5381 : SMgrRelation src_rel;
5382 : SMgrRelation dst_rel;
5383 :
5384 : /* Set the relpersistence. */
5385 58044 : relpersistence = permanent ?
5386 : RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
5387 :
5388 58044 : src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
5389 58044 : dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
5390 :
5391 : /*
5392 : * Create and copy all forks of the relation. During create database we
5393 : * have a separate cleanup mechanism which deletes complete database
5394 : * directory. Therefore, each individual relation doesn't need to be
5395 : * registered for cleanup.
5396 : */
5397 58044 : RelationCreateStorage(dst_rlocator, relpersistence, false);
5398 :
5399 : /* copy main fork. */
5400 58044 : RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
5401 : permanent);
5402 :
5403 : /* copy those extra forks that exist */
5404 58042 : for (ForkNumber forkNum = MAIN_FORKNUM + 1;
5405 232168 : forkNum <= MAX_FORKNUM; forkNum++)
5406 : {
5407 174126 : if (smgrexists(src_rel, forkNum))
5408 : {
5409 19170 : smgrcreate(dst_rel, forkNum, false);
5410 :
5411 : /*
5412 : * WAL log creation if the relation is persistent, or this is the
5413 : * init fork of an unlogged relation.
5414 : */
5415 19170 : if (permanent || forkNum == INIT_FORKNUM)
5416 19170 : log_smgrcreate(&dst_rlocator, forkNum);
5417 :
5418 : /* Copy a fork's data, block by block. */
5419 19170 : RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
5420 : permanent);
5421 : }
5422 : }
5423 58042 : }
5424 :
5425 : /* ---------------------------------------------------------------------
5426 : * FlushDatabaseBuffers
5427 : *
5428 : * This function writes all dirty pages of a database out to disk
5429 : * (or more accurately, out to kernel disk buffers), ensuring that the
5430 : * kernel has an up-to-date view of the database.
5431 : *
5432 : * Generally, the caller should be holding an appropriate lock to ensure
5433 : * no other backend is active in the target database; otherwise more
5434 : * pages could get dirtied.
5435 : *
5436 : * Note we don't worry about flushing any pages of temporary relations.
5437 : * It's assumed these wouldn't be interesting.
5438 : * --------------------------------------------------------------------
5439 : */
5440 : void
5441 5 : FlushDatabaseBuffers(Oid dbid)
5442 : {
5443 : int i;
5444 : BufferDesc *bufHdr;
5445 :
5446 645 : for (i = 0; i < NBuffers; i++)
5447 : {
5448 : uint64 buf_state;
5449 :
5450 640 : bufHdr = GetBufferDescriptor(i);
5451 :
5452 : /*
5453 : * As in DropRelationBuffers, an unlocked precheck should be safe and
5454 : * saves some cycles.
5455 : */
5456 640 : if (bufHdr->tag.dbOid != dbid)
5457 471 : continue;
5458 :
5459 : /* Make sure we can handle the pin */
5460 169 : ReservePrivateRefCountEntry();
5461 169 : ResourceOwnerEnlarge(CurrentResourceOwner);
5462 :
5463 169 : buf_state = LockBufHdr(bufHdr);
5464 169 : if (bufHdr->tag.dbOid == dbid &&
5465 169 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5466 : {
5467 34 : PinBuffer_Locked(bufHdr);
5468 34 : FlushUnlockedBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5469 34 : UnpinBuffer(bufHdr);
5470 : }
5471 : else
5472 135 : UnlockBufHdr(bufHdr);
5473 : }
5474 5 : }
5475 :
5476 : /*
5477 : * Flush a previously, share-exclusively or exclusively, locked and pinned
5478 : * buffer to the OS.
5479 : */
5480 : void
5481 79 : FlushOneBuffer(Buffer buffer)
5482 : {
5483 : BufferDesc *bufHdr;
5484 :
5485 : /* currently not needed, but no fundamental reason not to support */
5486 : Assert(!BufferIsLocal(buffer));
5487 :
5488 : Assert(BufferIsPinned(buffer));
5489 :
5490 79 : bufHdr = GetBufferDescriptor(buffer - 1);
5491 :
5492 : Assert(BufferIsLockedByMe(buffer));
5493 :
5494 79 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
5495 79 : }
5496 :
5497 : /*
5498 : * ReleaseBuffer -- release the pin on a buffer
5499 : */
5500 : void
5501 64874960 : ReleaseBuffer(Buffer buffer)
5502 : {
5503 64874960 : if (!BufferIsValid(buffer))
5504 0 : elog(ERROR, "bad buffer ID: %d", buffer);
5505 :
5506 64874960 : if (BufferIsLocal(buffer))
5507 1613980 : UnpinLocalBuffer(buffer);
5508 : else
5509 63260980 : UnpinBuffer(GetBufferDescriptor(buffer - 1));
5510 64874960 : }
5511 :
5512 : /*
5513 : * UnlockReleaseBuffer -- release the content lock and pin on a buffer
5514 : *
5515 : * This is just a shorthand for a common combination.
5516 : */
5517 : void
5518 19228819 : UnlockReleaseBuffer(Buffer buffer)
5519 : {
5520 19228819 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5521 19228819 : ReleaseBuffer(buffer);
5522 19228819 : }
5523 :
5524 : /*
5525 : * IncrBufferRefCount
5526 : * Increment the pin count on a buffer that we have *already* pinned
5527 : * at least once.
5528 : *
5529 : * This function cannot be used on a buffer we do not have pinned,
5530 : * because it doesn't change the shared buffer state.
5531 : */
5532 : void
5533 12259377 : IncrBufferRefCount(Buffer buffer)
5534 : {
5535 : Assert(BufferIsPinned(buffer));
5536 12259377 : ResourceOwnerEnlarge(CurrentResourceOwner);
5537 12259377 : if (BufferIsLocal(buffer))
5538 354952 : LocalRefCount[-buffer - 1]++;
5539 : else
5540 : {
5541 : PrivateRefCountEntry *ref;
5542 :
5543 11904425 : ref = GetPrivateRefCountEntry(buffer, true);
5544 : Assert(ref != NULL);
5545 11904425 : ref->data.refcount++;
5546 : }
5547 12259377 : ResourceOwnerRememberBuffer(CurrentResourceOwner, buffer);
5548 12259377 : }
5549 :
5550 : /*
5551 : * Shared-buffer only helper for MarkBufferDirtyHint() and
5552 : * BufferSetHintBits16().
5553 : *
5554 : * This is separated out because it turns out that the repeated checks for
5555 : * local buffers, repeated GetBufferDescriptor() and repeated reading of the
5556 : * buffer's state sufficiently hurts the performance of BufferSetHintBits16().
5557 : */
5558 : static inline void
5559 6954020 : MarkSharedBufferDirtyHint(Buffer buffer, BufferDesc *bufHdr, uint64 lockstate,
5560 : bool buffer_std)
5561 : {
5562 6954020 : Page page = BufferGetPage(buffer);
5563 :
5564 : Assert(GetPrivateRefCount(buffer) > 0);
5565 :
5566 : /* here, either share-exclusive or exclusive lock is OK */
5567 : Assert(BufferLockHeldByMeInMode(bufHdr, BUFFER_LOCK_EXCLUSIVE) ||
5568 : BufferLockHeldByMeInMode(bufHdr, BUFFER_LOCK_SHARE_EXCLUSIVE));
5569 :
5570 : /*
5571 : * This routine might get called many times on the same page, if we are
5572 : * making the first scan after commit of an xact that added/deleted many
5573 : * tuples. So, be as quick as we can if the buffer is already dirty.
5574 : *
5575 : * As we are holding (at least) a share-exclusive lock, nobody could have
5576 : * cleaned or dirtied the page concurrently, so we can just rely on the
5577 : * previously fetched value here without any danger of races.
5578 : */
5579 6954020 : if (unlikely((lockstate & (BM_DIRTY | BM_JUST_DIRTIED)) !=
5580 : (BM_DIRTY | BM_JUST_DIRTIED)))
5581 : {
5582 621340 : XLogRecPtr lsn = InvalidXLogRecPtr;
5583 621340 : bool delayChkptFlags = false;
5584 : uint64 buf_state;
5585 :
5586 : /*
5587 : * If we need to protect hint bit updates from torn writes, WAL-log a
5588 : * full page image of the page. This full page image is only necessary
5589 : * if the hint bit update is the first change to the page since the
5590 : * last checkpoint.
5591 : *
5592 : * We don't check full_page_writes here because that logic is included
5593 : * when we call XLogInsert() since the value changes dynamically.
5594 : */
5595 621340 : if (XLogHintBitIsNeeded() && (lockstate & BM_PERMANENT))
5596 : {
5597 : /*
5598 : * If we must not write WAL, due to a relfilelocator-specific
5599 : * condition or being in recovery, don't dirty the page. We can
5600 : * set the hint, just not dirty the page as a result so the hint
5601 : * is lost when we evict the page or shutdown.
5602 : *
5603 : * See src/backend/storage/page/README for longer discussion.
5604 : */
5605 683061 : if (RecoveryInProgress() ||
5606 62848 : RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
5607 558613 : return;
5608 :
5609 : /*
5610 : * If the block is already dirty because we either made a change
5611 : * or set a hint already, then we don't need to write a full page
5612 : * image. Note that aggressive cleaning of blocks dirtied by hint
5613 : * bit setting would increase the call rate. Bulk setting of hint
5614 : * bits would reduce the call rate...
5615 : *
5616 : * We must issue the WAL record before we mark the buffer dirty.
5617 : * Otherwise we might write the page before we write the WAL. That
5618 : * causes a race condition, since a checkpoint might occur between
5619 : * writing the WAL record and marking the buffer dirty. We solve
5620 : * that with a kluge, but one that is already in use during
5621 : * transaction commit to prevent race conditions. Basically, we
5622 : * simply prevent the checkpoint WAL record from being written
5623 : * until we have marked the buffer dirty. We don't start the
5624 : * checkpoint flush until we have marked dirty, so our checkpoint
5625 : * must flush the change to disk successfully or the checkpoint
5626 : * never gets written, so crash recovery will fix.
5627 : *
5628 : * It's possible we may enter here without an xid, so it is
5629 : * essential that CreateCheckPoint waits for virtual transactions
5630 : * rather than full transactionids.
5631 : */
5632 : Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
5633 61600 : MyProc->delayChkptFlags |= DELAY_CHKPT_START;
5634 61600 : delayChkptFlags = true;
5635 61600 : lsn = XLogSaveBufferForHint(buffer, buffer_std);
5636 : }
5637 :
5638 62727 : buf_state = LockBufHdr(bufHdr);
5639 :
5640 : /*
5641 : * It should not be possible for the buffer to already be dirty, see
5642 : * comment above.
5643 : */
5644 : Assert(!(buf_state & BM_DIRTY));
5645 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5646 :
5647 62727 : if (XLogRecPtrIsValid(lsn))
5648 : {
5649 : /*
5650 : * Set the page LSN if we wrote a backup block. To allow backends
5651 : * that only hold a share lock on the buffer to read the LSN in a
5652 : * tear-free manner, we set the page LSN while holding the buffer
5653 : * header lock. This allows any reader of an LSN who holds only a
5654 : * share lock to also obtain a buffer header lock before using
5655 : * PageGetLSN() to read the LSN in a tear free way. This is done
5656 : * in BufferGetLSNAtomic().
5657 : *
5658 : * If checksums are enabled, you might think we should reset the
5659 : * checksum here. That will happen when the page is written
5660 : * sometime later in this checkpoint cycle.
5661 : */
5662 31305 : PageSetLSN(page, lsn);
5663 : }
5664 :
5665 62727 : UnlockBufHdrExt(bufHdr, buf_state,
5666 : BM_DIRTY | BM_JUST_DIRTIED,
5667 : 0, 0);
5668 :
5669 62727 : if (delayChkptFlags)
5670 61600 : MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
5671 :
5672 62727 : pgBufferUsage.shared_blks_dirtied++;
5673 62727 : if (VacuumCostActive)
5674 1629 : VacuumCostBalance += VacuumCostPageDirty;
5675 : }
5676 : }
5677 :
5678 : /*
5679 : * MarkBufferDirtyHint
5680 : *
5681 : * Mark a buffer dirty for non-critical changes.
5682 : *
5683 : * This is essentially the same as MarkBufferDirty, except:
5684 : *
5685 : * 1. The caller does not write WAL; so if checksums are enabled, we may need
5686 : * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
5687 : * 2. The caller might have only a share-exclusive-lock instead of an
5688 : * exclusive-lock on the buffer's content lock.
5689 : * 3. This function does not guarantee that the buffer is always marked dirty
5690 : * (it e.g. can't always on a hot standby), so it cannot be used for
5691 : * important changes.
5692 : */
5693 : inline void
5694 636949 : MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
5695 : {
5696 : BufferDesc *bufHdr;
5697 :
5698 636949 : bufHdr = GetBufferDescriptor(buffer - 1);
5699 :
5700 636949 : if (!BufferIsValid(buffer))
5701 0 : elog(ERROR, "bad buffer ID: %d", buffer);
5702 :
5703 636949 : if (BufferIsLocal(buffer))
5704 : {
5705 13268 : MarkLocalBufferDirty(buffer);
5706 13268 : return;
5707 : }
5708 :
5709 623681 : MarkSharedBufferDirtyHint(buffer, bufHdr,
5710 623681 : pg_atomic_read_u64(&bufHdr->state),
5711 : buffer_std);
5712 : }
5713 :
5714 : /*
5715 : * Release buffer content locks for shared buffers.
5716 : *
5717 : * Used to clean up after errors.
5718 : *
5719 : * Currently, we can expect that resource owner cleanup, via
5720 : * ResOwnerReleaseBufferPin(), took care of releasing buffer content locks per
5721 : * se; the only thing we need to deal with here is clearing any PIN_COUNT
5722 : * request that was in progress.
5723 : */
5724 : void
5725 54709 : UnlockBuffers(void)
5726 : {
5727 54709 : BufferDesc *buf = PinCountWaitBuf;
5728 :
5729 54709 : if (buf)
5730 : {
5731 : uint64 buf_state;
5732 0 : uint64 unset_bits = 0;
5733 :
5734 0 : buf_state = LockBufHdr(buf);
5735 :
5736 : /*
5737 : * Don't complain if flag bit not set; it could have been reset but we
5738 : * got a cancel/die interrupt before getting the signal.
5739 : */
5740 0 : if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5741 0 : buf->wait_backend_pgprocno == MyProcNumber)
5742 0 : unset_bits = BM_PIN_COUNT_WAITER;
5743 :
5744 0 : UnlockBufHdrExt(buf, buf_state,
5745 : 0, unset_bits,
5746 : 0);
5747 :
5748 0 : PinCountWaitBuf = NULL;
5749 : }
5750 54709 : }
5751 :
5752 : /*
5753 : * Acquire the buffer content lock in the specified mode
5754 : *
5755 : * If the lock is not available, sleep until it is.
5756 : *
5757 : * Side effect: cancel/die interrupts are held off until lock release.
5758 : *
5759 : * This uses almost the same locking approach as lwlock.c's
5760 : * LWLockAcquire(). See documentation at the top of lwlock.c for a more
5761 : * detailed discussion.
5762 : *
5763 : * The reason that this, and most of the other BufferLock* functions, get both
5764 : * the Buffer and BufferDesc* as parameters, is that looking up one from the
5765 : * other repeatedly shows up noticeably in profiles.
5766 : *
5767 : * Callers should provide a constant for mode, for more efficient code
5768 : * generation.
5769 : */
5770 : static inline void
5771 83345785 : BufferLockAcquire(Buffer buffer, BufferDesc *buf_hdr, BufferLockMode mode)
5772 : {
5773 : PrivateRefCountEntry *entry;
5774 83345785 : int extraWaits = 0;
5775 :
5776 : /*
5777 : * Get reference to the refcount entry before we hold the lock, it seems
5778 : * better to do before holding the lock.
5779 : */
5780 83345785 : entry = GetPrivateRefCountEntry(buffer, true);
5781 :
5782 : /*
5783 : * We better not already hold a lock on the buffer.
5784 : */
5785 : Assert(entry->data.lockmode == BUFFER_LOCK_UNLOCK);
5786 :
5787 : /*
5788 : * Lock out cancel/die interrupts until we exit the code section protected
5789 : * by the content lock. This ensures that interrupts will not interfere
5790 : * with manipulations of data structures in shared memory.
5791 : */
5792 83345785 : HOLD_INTERRUPTS();
5793 :
5794 : for (;;)
5795 24890 : {
5796 83370675 : uint32 wait_event = 0; /* initialized to avoid compiler warning */
5797 : bool mustwait;
5798 :
5799 : /*
5800 : * Try to grab the lock the first time, we're not in the waitqueue
5801 : * yet/anymore.
5802 : */
5803 83370675 : mustwait = BufferLockAttempt(buf_hdr, mode);
5804 :
5805 83370675 : if (likely(!mustwait))
5806 : {
5807 83343808 : break;
5808 : }
5809 :
5810 : /*
5811 : * Ok, at this point we couldn't grab the lock on the first try. We
5812 : * cannot simply queue ourselves to the end of the list and wait to be
5813 : * woken up because by now the lock could long have been released.
5814 : * Instead add us to the queue and try to grab the lock again. If we
5815 : * succeed we need to revert the queuing and be happy, otherwise we
5816 : * recheck the lock. If we still couldn't grab it, we know that the
5817 : * other locker will see our queue entries when releasing since they
5818 : * existed before we checked for the lock.
5819 : */
5820 :
5821 : /* add to the queue */
5822 26867 : BufferLockQueueSelf(buf_hdr, mode);
5823 :
5824 : /* we're now guaranteed to be woken up if necessary */
5825 26867 : mustwait = BufferLockAttempt(buf_hdr, mode);
5826 :
5827 : /* ok, grabbed the lock the second time round, need to undo queueing */
5828 26867 : if (!mustwait)
5829 : {
5830 1977 : BufferLockDequeueSelf(buf_hdr);
5831 1977 : break;
5832 : }
5833 :
5834 24890 : switch (mode)
5835 : {
5836 12941 : case BUFFER_LOCK_EXCLUSIVE:
5837 12941 : wait_event = WAIT_EVENT_BUFFER_EXCLUSIVE;
5838 12941 : break;
5839 108 : case BUFFER_LOCK_SHARE_EXCLUSIVE:
5840 108 : wait_event = WAIT_EVENT_BUFFER_SHARE_EXCLUSIVE;
5841 108 : break;
5842 11841 : case BUFFER_LOCK_SHARE:
5843 11841 : wait_event = WAIT_EVENT_BUFFER_SHARED;
5844 11841 : break;
5845 : case BUFFER_LOCK_UNLOCK:
5846 : pg_unreachable();
5847 :
5848 : }
5849 24890 : pgstat_report_wait_start(wait_event);
5850 :
5851 : /*
5852 : * Wait until awakened.
5853 : *
5854 : * It is possible that we get awakened for a reason other than being
5855 : * signaled by BufferLockWakeup(). If so, loop back and wait again.
5856 : * Once we've gotten the lock, re-increment the sema by the number of
5857 : * additional signals received.
5858 : */
5859 : for (;;)
5860 : {
5861 24890 : PGSemaphoreLock(MyProc->sem);
5862 24890 : if (MyProc->lwWaiting == LW_WS_NOT_WAITING)
5863 24890 : break;
5864 0 : extraWaits++;
5865 : }
5866 :
5867 24890 : pgstat_report_wait_end();
5868 :
5869 : /* Retrying, allow BufferLockRelease to release waiters again. */
5870 24890 : pg_atomic_fetch_and_u64(&buf_hdr->state, ~BM_LOCK_WAKE_IN_PROGRESS);
5871 : }
5872 :
5873 : /* Remember that we now hold this lock */
5874 83345785 : entry->data.lockmode = mode;
5875 :
5876 : /*
5877 : * Fix the process wait semaphore's count for any absorbed wakeups.
5878 : */
5879 83345785 : while (unlikely(extraWaits-- > 0))
5880 0 : PGSemaphoreUnlock(MyProc->sem);
5881 83345785 : }
5882 :
5883 : /*
5884 : * Release a previously acquired buffer content lock.
5885 : */
5886 : static void
5887 85081453 : BufferLockUnlock(Buffer buffer, BufferDesc *buf_hdr)
5888 : {
5889 : BufferLockMode mode;
5890 : uint64 oldstate;
5891 : uint64 sub;
5892 :
5893 85081453 : mode = BufferLockDisownInternal(buffer, buf_hdr);
5894 :
5895 : /*
5896 : * Release my hold on lock, after that it can immediately be acquired by
5897 : * others, even if we still have to wakeup other waiters.
5898 : */
5899 85081453 : sub = BufferLockReleaseSub(mode);
5900 :
5901 85081453 : oldstate = pg_atomic_sub_fetch_u64(&buf_hdr->state, sub);
5902 :
5903 85081453 : BufferLockProcessRelease(buf_hdr, mode, oldstate);
5904 :
5905 : /*
5906 : * Now okay to allow cancel/die interrupts.
5907 : */
5908 85081453 : RESUME_INTERRUPTS();
5909 85081453 : }
5910 :
5911 :
5912 : /*
5913 : * Acquire the content lock for the buffer, but only if we don't have to wait.
5914 : *
5915 : * It is allowed to try to conditionally acquire a lock on a buffer that this
5916 : * backend has already locked, but the lock acquisition will always fail, even
5917 : * if the new lock acquisition does not conflict with an already held lock
5918 : * (e.g. two share locks). This is because we currently do not have space to
5919 : * track multiple lock ownerships of the same buffer within one backend. That
5920 : * is ok for the current uses of BufferLockConditional().
5921 : */
5922 : static bool
5923 1737818 : BufferLockConditional(Buffer buffer, BufferDesc *buf_hdr, BufferLockMode mode)
5924 : {
5925 1737818 : PrivateRefCountEntry *entry = GetPrivateRefCountEntry(buffer, true);
5926 : bool mustwait;
5927 :
5928 : /*
5929 : * As described above, if we're trying to lock a buffer this backend
5930 : * already has locked, return false, independent of the existing and
5931 : * desired lock level.
5932 : */
5933 1737818 : if (entry->data.lockmode != BUFFER_LOCK_UNLOCK)
5934 0 : return false;
5935 :
5936 : /*
5937 : * Lock out cancel/die interrupts until we exit the code section protected
5938 : * by the content lock. This ensures that interrupts will not interfere
5939 : * with manipulations of data structures in shared memory.
5940 : */
5941 1737818 : HOLD_INTERRUPTS();
5942 :
5943 : /* Check for the lock */
5944 1737818 : mustwait = BufferLockAttempt(buf_hdr, mode);
5945 :
5946 1737818 : if (mustwait)
5947 : {
5948 : /* Failed to get lock, so release interrupt holdoff */
5949 2150 : RESUME_INTERRUPTS();
5950 : }
5951 : else
5952 : {
5953 1735668 : entry->data.lockmode = mode;
5954 : }
5955 :
5956 1737818 : return !mustwait;
5957 : }
5958 :
5959 : /*
5960 : * Internal function that tries to atomically acquire the content lock in the
5961 : * passed in mode.
5962 : *
5963 : * This function will not block waiting for a lock to become free - that's the
5964 : * caller's job.
5965 : *
5966 : * Similar to LWLockAttemptLock().
5967 : */
5968 : static inline bool
5969 85135360 : BufferLockAttempt(BufferDesc *buf_hdr, BufferLockMode mode)
5970 : {
5971 : uint64 old_state;
5972 :
5973 : /*
5974 : * Read once outside the loop, later iterations will get the newer value
5975 : * via compare & exchange.
5976 : */
5977 85135360 : old_state = pg_atomic_read_u64(&buf_hdr->state);
5978 :
5979 : /* loop until we've determined whether we could acquire the lock or not */
5980 : while (true)
5981 36489 : {
5982 : uint64 desired_state;
5983 : bool lock_free;
5984 :
5985 85171849 : desired_state = old_state;
5986 :
5987 85171849 : if (mode == BUFFER_LOCK_EXCLUSIVE)
5988 : {
5989 26005034 : lock_free = (old_state & BM_LOCK_MASK) == 0;
5990 26005034 : if (lock_free)
5991 25975407 : desired_state += BM_LOCK_VAL_EXCLUSIVE;
5992 : }
5993 59166815 : else if (mode == BUFFER_LOCK_SHARE_EXCLUSIVE)
5994 : {
5995 585063 : lock_free = (old_state & (BM_LOCK_VAL_EXCLUSIVE | BM_LOCK_VAL_SHARE_EXCLUSIVE)) == 0;
5996 585063 : if (lock_free)
5997 584847 : desired_state += BM_LOCK_VAL_SHARE_EXCLUSIVE;
5998 : }
5999 : else
6000 : {
6001 58581752 : lock_free = (old_state & BM_LOCK_VAL_EXCLUSIVE) == 0;
6002 58581752 : if (lock_free)
6003 58556738 : desired_state += BM_LOCK_VAL_SHARED;
6004 : }
6005 :
6006 : /*
6007 : * Attempt to swap in the state we are expecting. If we didn't see
6008 : * lock to be free, that's just the old value. If we saw it as free,
6009 : * we'll attempt to mark it acquired. The reason that we always swap
6010 : * in the value is that this doubles as a memory barrier. We could try
6011 : * to be smarter and only swap in values if we saw the lock as free,
6012 : * but benchmark haven't shown it as beneficial so far.
6013 : *
6014 : * Retry if the value changed since we last looked at it.
6015 : */
6016 85171849 : if (likely(pg_atomic_compare_exchange_u64(&buf_hdr->state,
6017 : &old_state, desired_state)))
6018 : {
6019 85135360 : if (lock_free)
6020 : {
6021 : /* Great! Got the lock. */
6022 85081453 : return false;
6023 : }
6024 : else
6025 53907 : return true; /* somebody else has the lock */
6026 : }
6027 : }
6028 :
6029 : pg_unreachable();
6030 : }
6031 :
6032 : /*
6033 : * Add ourselves to the end of the content lock's wait queue.
6034 : */
6035 : static void
6036 26867 : BufferLockQueueSelf(BufferDesc *buf_hdr, BufferLockMode mode)
6037 : {
6038 : /*
6039 : * If we don't have a PGPROC structure, there's no way to wait. This
6040 : * should never occur, since MyProc should only be null during shared
6041 : * memory initialization.
6042 : */
6043 26867 : if (MyProc == NULL)
6044 0 : elog(PANIC, "cannot wait without a PGPROC structure");
6045 :
6046 26867 : if (MyProc->lwWaiting != LW_WS_NOT_WAITING)
6047 0 : elog(PANIC, "queueing for lock while waiting on another one");
6048 :
6049 26867 : LockBufHdr(buf_hdr);
6050 :
6051 : /* setting the flag is protected by the spinlock */
6052 26867 : pg_atomic_fetch_or_u64(&buf_hdr->state, BM_LOCK_HAS_WAITERS);
6053 :
6054 : /*
6055 : * These are currently used both for lwlocks and buffer content locks,
6056 : * which is acceptable, although not pretty, because a backend can't wait
6057 : * for both types of locks at the same time.
6058 : */
6059 26867 : MyProc->lwWaiting = LW_WS_WAITING;
6060 26867 : MyProc->lwWaitMode = mode;
6061 :
6062 26867 : proclist_push_tail(&buf_hdr->lock_waiters, MyProcNumber, lwWaitLink);
6063 :
6064 : /* Can release the mutex now */
6065 26867 : UnlockBufHdr(buf_hdr);
6066 26867 : }
6067 :
6068 : /*
6069 : * Remove ourselves from the waitlist.
6070 : *
6071 : * This is used if we queued ourselves because we thought we needed to sleep
6072 : * but, after further checking, we discovered that we don't actually need to
6073 : * do so.
6074 : */
6075 : static void
6076 1977 : BufferLockDequeueSelf(BufferDesc *buf_hdr)
6077 : {
6078 : bool on_waitlist;
6079 :
6080 1977 : LockBufHdr(buf_hdr);
6081 :
6082 1977 : on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
6083 1977 : if (on_waitlist)
6084 1457 : proclist_delete(&buf_hdr->lock_waiters, MyProcNumber, lwWaitLink);
6085 :
6086 1977 : if (proclist_is_empty(&buf_hdr->lock_waiters) &&
6087 1839 : (pg_atomic_read_u64(&buf_hdr->state) & BM_LOCK_HAS_WAITERS) != 0)
6088 : {
6089 1327 : pg_atomic_fetch_and_u64(&buf_hdr->state, ~BM_LOCK_HAS_WAITERS);
6090 : }
6091 :
6092 : /* XXX: combine with fetch_and above? */
6093 1977 : UnlockBufHdr(buf_hdr);
6094 :
6095 : /* clear waiting state again, nice for debugging */
6096 1977 : if (on_waitlist)
6097 1457 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
6098 : else
6099 : {
6100 520 : int extraWaits = 0;
6101 :
6102 :
6103 : /*
6104 : * Somebody else dequeued us and has or will wake us up. Deal with the
6105 : * superfluous absorption of a wakeup.
6106 : */
6107 :
6108 : /*
6109 : * Clear BM_LOCK_WAKE_IN_PROGRESS if somebody woke us before we
6110 : * removed ourselves - they'll have set it.
6111 : */
6112 520 : pg_atomic_fetch_and_u64(&buf_hdr->state, ~BM_LOCK_WAKE_IN_PROGRESS);
6113 :
6114 : /*
6115 : * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
6116 : * get reset at some inconvenient point later. Most of the time this
6117 : * will immediately return.
6118 : */
6119 : for (;;)
6120 : {
6121 520 : PGSemaphoreLock(MyProc->sem);
6122 520 : if (MyProc->lwWaiting == LW_WS_NOT_WAITING)
6123 520 : break;
6124 0 : extraWaits++;
6125 : }
6126 :
6127 : /*
6128 : * Fix the process wait semaphore's count for any absorbed wakeups.
6129 : */
6130 520 : while (extraWaits-- > 0)
6131 0 : PGSemaphoreUnlock(MyProc->sem);
6132 : }
6133 1977 : }
6134 :
6135 : /*
6136 : * Stop treating lock as held by current backend.
6137 : *
6138 : * After calling this function it's the callers responsibility to ensure that
6139 : * the lock gets released, even in case of an error. This only is desirable if
6140 : * the lock is going to be released in a different process than the process
6141 : * that acquired it.
6142 : */
6143 : static inline void
6144 0 : BufferLockDisown(Buffer buffer, BufferDesc *buf_hdr)
6145 : {
6146 0 : BufferLockDisownInternal(buffer, buf_hdr);
6147 0 : RESUME_INTERRUPTS();
6148 0 : }
6149 :
6150 : /*
6151 : * Stop treating lock as held by current backend.
6152 : *
6153 : * This is the code that can be shared between actually releasing a lock
6154 : * (BufferLockUnlock()) and just not tracking ownership of the lock anymore
6155 : * without releasing the lock (BufferLockDisown()).
6156 : */
6157 : static inline int
6158 85081453 : BufferLockDisownInternal(Buffer buffer, BufferDesc *buf_hdr)
6159 : {
6160 : BufferLockMode mode;
6161 : PrivateRefCountEntry *ref;
6162 :
6163 85081453 : ref = GetPrivateRefCountEntry(buffer, false);
6164 85081453 : if (ref == NULL)
6165 0 : elog(ERROR, "lock %d is not held", buffer);
6166 85081453 : mode = ref->data.lockmode;
6167 85081453 : ref->data.lockmode = BUFFER_LOCK_UNLOCK;
6168 :
6169 85081453 : return mode;
6170 : }
6171 :
6172 : /*
6173 : * Wakeup all the lockers that currently have a chance to acquire the lock.
6174 : *
6175 : * wake_exclusive indicates whether exclusive lock waiters should be woken up.
6176 : */
6177 : static void
6178 24322 : BufferLockWakeup(BufferDesc *buf_hdr, bool wake_exclusive)
6179 : {
6180 24322 : bool new_wake_in_progress = false;
6181 24322 : bool wake_share_exclusive = true;
6182 : proclist_head wakeup;
6183 : proclist_mutable_iter iter;
6184 :
6185 24322 : proclist_init(&wakeup);
6186 :
6187 : /* lock wait list while collecting backends to wake up */
6188 24322 : LockBufHdr(buf_hdr);
6189 :
6190 37280 : proclist_foreach_modify(iter, &buf_hdr->lock_waiters, lwWaitLink)
6191 : {
6192 26176 : PGPROC *waiter = GetPGProcByNumber(iter.cur);
6193 :
6194 : /*
6195 : * Already woke up a conflicting lock, so skip over this wait list
6196 : * entry.
6197 : */
6198 26176 : if (!wake_exclusive && waiter->lwWaitMode == BUFFER_LOCK_EXCLUSIVE)
6199 766 : continue;
6200 25410 : if (!wake_share_exclusive && waiter->lwWaitMode == BUFFER_LOCK_SHARE_EXCLUSIVE)
6201 0 : continue;
6202 :
6203 25410 : proclist_delete(&buf_hdr->lock_waiters, iter.cur, lwWaitLink);
6204 25410 : proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
6205 :
6206 : /*
6207 : * Prevent additional wakeups until retryer gets to run. Backends that
6208 : * are just waiting for the lock to become free don't retry
6209 : * automatically.
6210 : */
6211 25410 : new_wake_in_progress = true;
6212 :
6213 : /*
6214 : * Signal that the process isn't on the wait list anymore. This allows
6215 : * BufferLockDequeueSelf() to remove itself from the waitlist with a
6216 : * proclist_delete(), rather than having to check if it has been
6217 : * removed from the list.
6218 : */
6219 : Assert(waiter->lwWaiting == LW_WS_WAITING);
6220 25410 : waiter->lwWaiting = LW_WS_PENDING_WAKEUP;
6221 :
6222 : /*
6223 : * Don't wakeup further waiters after waking a conflicting waiter.
6224 : */
6225 25410 : if (waiter->lwWaitMode == BUFFER_LOCK_SHARE)
6226 : {
6227 : /*
6228 : * Share locks conflict with exclusive locks.
6229 : */
6230 12084 : wake_exclusive = false;
6231 : }
6232 13326 : else if (waiter->lwWaitMode == BUFFER_LOCK_SHARE_EXCLUSIVE)
6233 : {
6234 : /*
6235 : * Share-exclusive locks conflict with share-exclusive and
6236 : * exclusive locks.
6237 : */
6238 108 : wake_exclusive = false;
6239 108 : wake_share_exclusive = false;
6240 : }
6241 13218 : else if (waiter->lwWaitMode == BUFFER_LOCK_EXCLUSIVE)
6242 : {
6243 : /*
6244 : * Exclusive locks conflict with all other locks, there's no point
6245 : * in waking up anybody else.
6246 : */
6247 13218 : break;
6248 : }
6249 : }
6250 :
6251 : Assert(proclist_is_empty(&wakeup) || pg_atomic_read_u64(&buf_hdr->state) & BM_LOCK_HAS_WAITERS);
6252 :
6253 : /* unset required flags, and release lock, in one fell swoop */
6254 : {
6255 : uint64 old_state;
6256 : uint64 desired_state;
6257 :
6258 24322 : old_state = pg_atomic_read_u64(&buf_hdr->state);
6259 : while (true)
6260 : {
6261 24381 : desired_state = old_state;
6262 :
6263 : /* compute desired flags */
6264 :
6265 24381 : if (new_wake_in_progress)
6266 24014 : desired_state |= BM_LOCK_WAKE_IN_PROGRESS;
6267 : else
6268 367 : desired_state &= ~BM_LOCK_WAKE_IN_PROGRESS;
6269 :
6270 24381 : if (proclist_is_empty(&buf_hdr->lock_waiters))
6271 21967 : desired_state &= ~BM_LOCK_HAS_WAITERS;
6272 :
6273 24381 : desired_state &= ~BM_LOCKED; /* release lock */
6274 :
6275 24381 : if (pg_atomic_compare_exchange_u64(&buf_hdr->state, &old_state,
6276 : desired_state))
6277 24322 : break;
6278 : }
6279 : }
6280 :
6281 : /* Awaken any waiters I removed from the queue. */
6282 49732 : proclist_foreach_modify(iter, &wakeup, lwWaitLink)
6283 : {
6284 25410 : PGPROC *waiter = GetPGProcByNumber(iter.cur);
6285 :
6286 25410 : proclist_delete(&wakeup, iter.cur, lwWaitLink);
6287 :
6288 : /*
6289 : * Guarantee that lwWaiting being unset only becomes visible once the
6290 : * unlink from the link has completed. Otherwise the target backend
6291 : * could be woken up for other reason and enqueue for a new lock - if
6292 : * that happens before the list unlink happens, the list would end up
6293 : * being corrupted.
6294 : *
6295 : * The barrier pairs with the LockBufHdr() when enqueuing for another
6296 : * lock.
6297 : */
6298 25410 : pg_write_barrier();
6299 25410 : waiter->lwWaiting = LW_WS_NOT_WAITING;
6300 25410 : PGSemaphoreUnlock(waiter->sem);
6301 : }
6302 24322 : }
6303 :
6304 : /*
6305 : * Compute subtraction from buffer state for a release of a held lock in
6306 : * `mode`.
6307 : *
6308 : * This is separated from BufferLockUnlock() as we want to combine the lock
6309 : * release with other atomic operations when possible, leading to the lock
6310 : * release being done in multiple places, each needing to compute what to
6311 : * subtract from the lock state.
6312 : */
6313 : static inline uint64
6314 85081453 : BufferLockReleaseSub(BufferLockMode mode)
6315 : {
6316 : /*
6317 : * Turns out that a switch() leads gcc to generate sufficiently worse code
6318 : * for this to show up in profiles...
6319 : */
6320 85081453 : if (mode == BUFFER_LOCK_EXCLUSIVE)
6321 25974635 : return BM_LOCK_VAL_EXCLUSIVE;
6322 59106818 : else if (mode == BUFFER_LOCK_SHARE_EXCLUSIVE)
6323 3357411 : return BM_LOCK_VAL_SHARE_EXCLUSIVE;
6324 : else
6325 : {
6326 : Assert(mode == BUFFER_LOCK_SHARE);
6327 55749407 : return BM_LOCK_VAL_SHARED;
6328 : }
6329 :
6330 : return 0; /* keep compiler quiet */
6331 : }
6332 :
6333 : /*
6334 : * Handle work that needs to be done after releasing a lock that was held in
6335 : * `mode`, where `lockstate` is the result of the atomic operation modifying
6336 : * the state variable.
6337 : *
6338 : * This is separated from BufferLockUnlock() as we want to combine the lock
6339 : * release with other atomic operations when possible, leading to the lock
6340 : * release being done in multiple places.
6341 : */
6342 : static void
6343 85081453 : BufferLockProcessRelease(BufferDesc *buf_hdr, BufferLockMode mode, uint64 lockstate)
6344 : {
6345 85081453 : bool check_waiters = false;
6346 85081453 : bool wake_exclusive = false;
6347 :
6348 : /* nobody else can have that kind of lock */
6349 : Assert(!(lockstate & BM_LOCK_VAL_EXCLUSIVE));
6350 :
6351 : /*
6352 : * If we're still waiting for backends to get scheduled, don't wake them
6353 : * up again. Otherwise check if we need to look through the waitqueue to
6354 : * wake other backends.
6355 : */
6356 85081453 : if ((lockstate & BM_LOCK_HAS_WAITERS) &&
6357 81338 : !(lockstate & BM_LOCK_WAKE_IN_PROGRESS))
6358 : {
6359 38774 : if ((lockstate & BM_LOCK_MASK) == 0)
6360 : {
6361 : /*
6362 : * We released a lock and the lock was, in that moment, free. We
6363 : * therefore can wake waiters for any kind of lock.
6364 : */
6365 24321 : check_waiters = true;
6366 24321 : wake_exclusive = true;
6367 : }
6368 14453 : else if (mode == BUFFER_LOCK_SHARE_EXCLUSIVE)
6369 : {
6370 : /*
6371 : * We released the lock, but another backend still holds a lock.
6372 : * We can't have released an exclusive lock, as there couldn't
6373 : * have been other lock holders. If we released a share lock, no
6374 : * waiters need to be woken up, as there must be other share
6375 : * lockers. However, if we held a share-exclusive lock, another
6376 : * backend now could acquire a share-exclusive lock.
6377 : */
6378 1 : check_waiters = true;
6379 1 : wake_exclusive = false;
6380 : }
6381 : }
6382 :
6383 : /*
6384 : * As waking up waiters requires the spinlock to be acquired, only do so
6385 : * if necessary.
6386 : */
6387 85081453 : if (check_waiters)
6388 24322 : BufferLockWakeup(buf_hdr, wake_exclusive);
6389 85081453 : }
6390 :
6391 : /*
6392 : * BufferLockHeldByMeInMode - test whether my process holds the content lock
6393 : * in the specified mode
6394 : *
6395 : * This is meant as debug support only.
6396 : */
6397 : static bool
6398 0 : BufferLockHeldByMeInMode(BufferDesc *buf_hdr, BufferLockMode mode)
6399 : {
6400 : PrivateRefCountEntry *entry =
6401 0 : GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf_hdr), false);
6402 :
6403 0 : if (!entry)
6404 0 : return false;
6405 : else
6406 0 : return entry->data.lockmode == mode;
6407 : }
6408 :
6409 : /*
6410 : * BufferLockHeldByMe - test whether my process holds the content lock in any
6411 : * mode
6412 : *
6413 : * This is meant as debug support only.
6414 : */
6415 : static bool
6416 0 : BufferLockHeldByMe(BufferDesc *buf_hdr)
6417 : {
6418 : PrivateRefCountEntry *entry =
6419 0 : GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf_hdr), false);
6420 :
6421 0 : if (!entry)
6422 0 : return false;
6423 : else
6424 0 : return entry->data.lockmode != BUFFER_LOCK_UNLOCK;
6425 : }
6426 :
6427 : /*
6428 : * Release the content lock for the buffer.
6429 : */
6430 : void
6431 89768361 : UnlockBuffer(Buffer buffer)
6432 : {
6433 : BufferDesc *buf_hdr;
6434 :
6435 : Assert(BufferIsPinned(buffer));
6436 89768361 : if (BufferIsLocal(buffer))
6437 4993273 : return; /* local buffers need no lock */
6438 :
6439 84775088 : buf_hdr = GetBufferDescriptor(buffer - 1);
6440 84775088 : BufferLockUnlock(buffer, buf_hdr);
6441 : }
6442 :
6443 : /*
6444 : * Acquire the content_lock for the buffer.
6445 : */
6446 : void
6447 87956014 : LockBufferInternal(Buffer buffer, BufferLockMode mode)
6448 : {
6449 : BufferDesc *buf_hdr;
6450 :
6451 : /*
6452 : * We can't wait if we haven't got a PGPROC. This should only occur
6453 : * during bootstrap or shared memory initialization. Put an Assert here
6454 : * to catch unsafe coding practices.
6455 : */
6456 : Assert(!(MyProc == NULL && IsUnderPostmaster));
6457 :
6458 : /* handled in LockBuffer() wrapper */
6459 : Assert(mode != BUFFER_LOCK_UNLOCK);
6460 :
6461 : Assert(BufferIsPinned(buffer));
6462 87956014 : if (BufferIsLocal(buffer))
6463 4916484 : return; /* local buffers need no lock */
6464 :
6465 83039530 : buf_hdr = GetBufferDescriptor(buffer - 1);
6466 :
6467 : /*
6468 : * Test the most frequent lock modes first. While a switch (mode) would be
6469 : * nice, at least gcc generates considerably worse code for it.
6470 : *
6471 : * Call BufferLockAcquire() with a constant argument for mode, to generate
6472 : * more efficient code for the different lock modes.
6473 : */
6474 83039530 : if (mode == BUFFER_LOCK_SHARE)
6475 58521975 : BufferLockAcquire(buffer, buf_hdr, BUFFER_LOCK_SHARE);
6476 24517555 : else if (mode == BUFFER_LOCK_EXCLUSIVE)
6477 24517555 : BufferLockAcquire(buffer, buf_hdr, BUFFER_LOCK_EXCLUSIVE);
6478 0 : else if (mode == BUFFER_LOCK_SHARE_EXCLUSIVE)
6479 0 : BufferLockAcquire(buffer, buf_hdr, BUFFER_LOCK_SHARE_EXCLUSIVE);
6480 : else
6481 0 : elog(ERROR, "unrecognized buffer lock mode: %d", mode);
6482 : }
6483 :
6484 : /*
6485 : * Acquire the content_lock for the buffer, but only if we don't have to wait.
6486 : *
6487 : * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
6488 : */
6489 : bool
6490 1523817 : ConditionalLockBuffer(Buffer buffer)
6491 : {
6492 : BufferDesc *buf;
6493 :
6494 : Assert(BufferIsPinned(buffer));
6495 1523817 : if (BufferIsLocal(buffer))
6496 64603 : return true; /* act as though we got it */
6497 :
6498 1459214 : buf = GetBufferDescriptor(buffer - 1);
6499 :
6500 1459214 : return BufferLockConditional(buffer, buf, BUFFER_LOCK_EXCLUSIVE);
6501 : }
6502 :
6503 : /*
6504 : * Verify that this backend is pinning the buffer exactly once.
6505 : *
6506 : * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
6507 : * holds a pin on the buffer. We do not care whether some other backend does.
6508 : */
6509 : void
6510 2565641 : CheckBufferIsPinnedOnce(Buffer buffer)
6511 : {
6512 2565641 : if (BufferIsLocal(buffer))
6513 : {
6514 791 : if (LocalRefCount[-buffer - 1] != 1)
6515 0 : elog(ERROR, "incorrect local pin count: %d",
6516 : LocalRefCount[-buffer - 1]);
6517 : }
6518 : else
6519 : {
6520 2564850 : if (GetPrivateRefCount(buffer) != 1)
6521 0 : elog(ERROR, "incorrect local pin count: %d",
6522 : GetPrivateRefCount(buffer));
6523 : }
6524 2565641 : }
6525 :
6526 : /*
6527 : * LockBufferForCleanup - lock a buffer in preparation for deleting items
6528 : *
6529 : * Items may be deleted from a disk page only when the caller (a) holds an
6530 : * exclusive lock on the buffer and (b) has observed that no other backend
6531 : * holds a pin on the buffer. If there is a pin, then the other backend
6532 : * might have a pointer into the buffer (for example, a heapscan reference
6533 : * to an item --- see README for more details). It's OK if a pin is added
6534 : * after the cleanup starts, however; the newly-arrived backend will be
6535 : * unable to look at the page until we release the exclusive lock.
6536 : *
6537 : * To implement this protocol, a would-be deleter must pin the buffer and
6538 : * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
6539 : * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
6540 : * it has successfully observed pin count = 1.
6541 : */
6542 : void
6543 22258 : LockBufferForCleanup(Buffer buffer)
6544 : {
6545 : BufferDesc *bufHdr;
6546 22258 : TimestampTz waitStart = 0;
6547 22258 : bool waiting = false;
6548 22258 : bool logged_recovery_conflict = false;
6549 :
6550 : Assert(BufferIsPinned(buffer));
6551 : Assert(PinCountWaitBuf == NULL);
6552 :
6553 22258 : CheckBufferIsPinnedOnce(buffer);
6554 :
6555 : /*
6556 : * We do not yet need to be worried about in-progress AIOs holding a pin,
6557 : * as we, so far, only support doing reads via AIO and this function can
6558 : * only be called once the buffer is valid (i.e. no read can be in
6559 : * flight).
6560 : */
6561 :
6562 : /* Nobody else to wait for */
6563 22258 : if (BufferIsLocal(buffer))
6564 16 : return;
6565 :
6566 22242 : bufHdr = GetBufferDescriptor(buffer - 1);
6567 :
6568 : for (;;)
6569 92 : {
6570 : uint64 buf_state;
6571 22334 : uint64 unset_bits = 0;
6572 :
6573 : /* Try to acquire lock */
6574 22334 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6575 22334 : buf_state = LockBufHdr(bufHdr);
6576 :
6577 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
6578 22334 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
6579 : {
6580 : /* Successfully acquired exclusive lock with pincount 1 */
6581 22242 : UnlockBufHdr(bufHdr);
6582 :
6583 : /*
6584 : * Emit the log message if recovery conflict on buffer pin was
6585 : * resolved but the startup process waited longer than
6586 : * deadlock_timeout for it.
6587 : */
6588 22242 : if (logged_recovery_conflict)
6589 2 : LogRecoveryConflict(RECOVERY_CONFLICT_BUFFERPIN,
6590 : waitStart, GetCurrentTimestamp(),
6591 : NULL, false);
6592 :
6593 22242 : if (waiting)
6594 : {
6595 : /* reset ps display to remove the suffix if we added one */
6596 2 : set_ps_display_remove_suffix();
6597 2 : waiting = false;
6598 : }
6599 22242 : return;
6600 : }
6601 : /* Failed, so mark myself as waiting for pincount 1 */
6602 92 : if (buf_state & BM_PIN_COUNT_WAITER)
6603 : {
6604 0 : UnlockBufHdr(bufHdr);
6605 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6606 0 : elog(ERROR, "multiple backends attempting to wait for pincount 1");
6607 : }
6608 92 : bufHdr->wait_backend_pgprocno = MyProcNumber;
6609 92 : PinCountWaitBuf = bufHdr;
6610 92 : UnlockBufHdrExt(bufHdr, buf_state,
6611 : BM_PIN_COUNT_WAITER, 0,
6612 : 0);
6613 92 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6614 :
6615 : /* Wait to be signaled by UnpinBuffer() */
6616 92 : if (InHotStandby)
6617 : {
6618 8 : if (!waiting)
6619 : {
6620 : /* adjust the process title to indicate that it's waiting */
6621 2 : set_ps_display_suffix("waiting");
6622 2 : waiting = true;
6623 : }
6624 :
6625 : /*
6626 : * Emit the log message if the startup process is waiting longer
6627 : * than deadlock_timeout for recovery conflict on buffer pin.
6628 : *
6629 : * Skip this if first time through because the startup process has
6630 : * not started waiting yet in this case. So, the wait start
6631 : * timestamp is set after this logic.
6632 : */
6633 8 : if (waitStart != 0 && !logged_recovery_conflict)
6634 : {
6635 3 : TimestampTz now = GetCurrentTimestamp();
6636 :
6637 3 : if (TimestampDifferenceExceeds(waitStart, now,
6638 : DeadlockTimeout))
6639 : {
6640 2 : LogRecoveryConflict(RECOVERY_CONFLICT_BUFFERPIN,
6641 : waitStart, now, NULL, true);
6642 2 : logged_recovery_conflict = true;
6643 : }
6644 : }
6645 :
6646 : /*
6647 : * Set the wait start timestamp if logging is enabled and first
6648 : * time through.
6649 : */
6650 8 : if (log_recovery_conflict_waits && waitStart == 0)
6651 2 : waitStart = GetCurrentTimestamp();
6652 :
6653 : /* Publish the bufid that Startup process waits on */
6654 8 : SetStartupBufferPinWaitBufId(buffer - 1);
6655 : /* Set alarm and then wait to be signaled by UnpinBuffer() */
6656 8 : ResolveRecoveryConflictWithBufferPin();
6657 : /* Reset the published bufid */
6658 8 : SetStartupBufferPinWaitBufId(-1);
6659 : }
6660 : else
6661 84 : ProcWaitForSignal(WAIT_EVENT_BUFFER_CLEANUP);
6662 :
6663 : /*
6664 : * Remove flag marking us as waiter. Normally this will not be set
6665 : * anymore, but ProcWaitForSignal() can return for other signals as
6666 : * well. We take care to only reset the flag if we're the waiter, as
6667 : * theoretically another backend could have started waiting. That's
6668 : * impossible with the current usages due to table level locking, but
6669 : * better be safe.
6670 : */
6671 92 : buf_state = LockBufHdr(bufHdr);
6672 92 : if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
6673 6 : bufHdr->wait_backend_pgprocno == MyProcNumber)
6674 6 : unset_bits |= BM_PIN_COUNT_WAITER;
6675 :
6676 92 : UnlockBufHdrExt(bufHdr, buf_state,
6677 : 0, unset_bits,
6678 : 0);
6679 :
6680 92 : PinCountWaitBuf = NULL;
6681 : /* Loop back and try again */
6682 : }
6683 : }
6684 :
6685 : /*
6686 : * Check called from ProcessRecoveryConflictInterrupts() when Startup process
6687 : * requests cancellation of all pin holders that are blocking it.
6688 : */
6689 : bool
6690 3 : HoldingBufferPinThatDelaysRecovery(void)
6691 : {
6692 3 : int bufid = GetStartupBufferPinWaitBufId();
6693 :
6694 : /*
6695 : * If we get woken slowly then it's possible that the Startup process was
6696 : * already woken by other backends before we got here. Also possible that
6697 : * we get here by multiple interrupts or interrupts at inappropriate
6698 : * times, so make sure we do nothing if the bufid is not set.
6699 : */
6700 3 : if (bufid < 0)
6701 1 : return false;
6702 :
6703 2 : if (GetPrivateRefCount(bufid + 1) > 0)
6704 2 : return true;
6705 :
6706 0 : return false;
6707 : }
6708 :
6709 : /*
6710 : * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
6711 : *
6712 : * We won't loop, but just check once to see if the pin count is OK. If
6713 : * not, return false with no lock held.
6714 : */
6715 : bool
6716 576206 : ConditionalLockBufferForCleanup(Buffer buffer)
6717 : {
6718 : BufferDesc *bufHdr;
6719 : uint64 buf_state,
6720 : refcount;
6721 :
6722 : Assert(BufferIsValid(buffer));
6723 :
6724 : /* see AIO related comment in LockBufferForCleanup() */
6725 :
6726 576206 : if (BufferIsLocal(buffer))
6727 : {
6728 803 : refcount = LocalRefCount[-buffer - 1];
6729 : /* There should be exactly one pin */
6730 : Assert(refcount > 0);
6731 803 : if (refcount != 1)
6732 21 : return false;
6733 : /* Nobody else to wait for */
6734 782 : return true;
6735 : }
6736 :
6737 : /* There should be exactly one local pin */
6738 575403 : refcount = GetPrivateRefCount(buffer);
6739 : Assert(refcount);
6740 575403 : if (refcount != 1)
6741 175 : return false;
6742 :
6743 : /* Try to acquire lock */
6744 575228 : if (!ConditionalLockBuffer(buffer))
6745 63 : return false;
6746 :
6747 575165 : bufHdr = GetBufferDescriptor(buffer - 1);
6748 575165 : buf_state = LockBufHdr(bufHdr);
6749 575165 : refcount = BUF_STATE_GET_REFCOUNT(buf_state);
6750 :
6751 : Assert(refcount > 0);
6752 575165 : if (refcount == 1)
6753 : {
6754 : /* Successfully acquired exclusive lock with pincount 1 */
6755 574972 : UnlockBufHdr(bufHdr);
6756 574972 : return true;
6757 : }
6758 :
6759 : /* Failed, so release the lock */
6760 193 : UnlockBufHdr(bufHdr);
6761 193 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6762 193 : return false;
6763 : }
6764 :
6765 : /*
6766 : * IsBufferCleanupOK - as above, but we already have the lock
6767 : *
6768 : * Check whether it's OK to perform cleanup on a buffer we've already
6769 : * locked. If we observe that the pin count is 1, our exclusive lock
6770 : * happens to be a cleanup lock, and we can proceed with anything that
6771 : * would have been allowable had we sought a cleanup lock originally.
6772 : */
6773 : bool
6774 2030 : IsBufferCleanupOK(Buffer buffer)
6775 : {
6776 : BufferDesc *bufHdr;
6777 : uint64 buf_state;
6778 :
6779 : Assert(BufferIsValid(buffer));
6780 :
6781 : /* see AIO related comment in LockBufferForCleanup() */
6782 :
6783 2030 : if (BufferIsLocal(buffer))
6784 : {
6785 : /* There should be exactly one pin */
6786 0 : if (LocalRefCount[-buffer - 1] != 1)
6787 0 : return false;
6788 : /* Nobody else to wait for */
6789 0 : return true;
6790 : }
6791 :
6792 : /* There should be exactly one local pin */
6793 2030 : if (GetPrivateRefCount(buffer) != 1)
6794 0 : return false;
6795 :
6796 2030 : bufHdr = GetBufferDescriptor(buffer - 1);
6797 :
6798 : /* caller must hold exclusive lock on buffer */
6799 : Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
6800 :
6801 2030 : buf_state = LockBufHdr(bufHdr);
6802 :
6803 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
6804 2030 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
6805 : {
6806 : /* pincount is OK. */
6807 2030 : UnlockBufHdr(bufHdr);
6808 2030 : return true;
6809 : }
6810 :
6811 0 : UnlockBufHdr(bufHdr);
6812 0 : return false;
6813 : }
6814 :
6815 : /*
6816 : * Helper for BufferBeginSetHintBits() and BufferSetHintBits16().
6817 : *
6818 : * This checks if the current lock mode already suffices to allow hint bits
6819 : * being set and, if not, whether the current lock can be upgraded.
6820 : *
6821 : * Updates *lockstate when returning true.
6822 : */
6823 : static inline bool
6824 6733234 : SharedBufferBeginSetHintBits(Buffer buffer, BufferDesc *buf_hdr, uint64 *lockstate)
6825 : {
6826 : uint64 old_state;
6827 : PrivateRefCountEntry *ref;
6828 : BufferLockMode mode;
6829 :
6830 6733234 : ref = GetPrivateRefCountEntry(buffer, true);
6831 :
6832 6733234 : if (ref == NULL)
6833 0 : elog(ERROR, "buffer is not pinned");
6834 :
6835 6733234 : mode = ref->data.lockmode;
6836 6733234 : if (mode == BUFFER_LOCK_UNLOCK)
6837 0 : elog(ERROR, "buffer is not locked");
6838 :
6839 : /* we're done if we are already holding a sufficient lock level */
6840 6733234 : if (mode == BUFFER_LOCK_EXCLUSIVE || mode == BUFFER_LOCK_SHARE_EXCLUSIVE)
6841 : {
6842 3960621 : *lockstate = pg_atomic_read_u64(&buf_hdr->state);
6843 3960621 : return true;
6844 : }
6845 :
6846 : /*
6847 : * We are only holding a share lock right now, try to upgrade it to
6848 : * SHARE_EXCLUSIVE.
6849 : */
6850 : Assert(mode == BUFFER_LOCK_SHARE);
6851 :
6852 2772613 : old_state = pg_atomic_read_u64(&buf_hdr->state);
6853 : while (true)
6854 10 : {
6855 : uint64 desired_state;
6856 :
6857 2772623 : desired_state = old_state;
6858 :
6859 : /*
6860 : * Can't upgrade if somebody else holds the lock in exclusive or
6861 : * share-exclusive mode.
6862 : */
6863 2772623 : if (unlikely((old_state & (BM_LOCK_VAL_EXCLUSIVE | BM_LOCK_VAL_SHARE_EXCLUSIVE)) != 0))
6864 : {
6865 45 : return false;
6866 : }
6867 :
6868 : /* currently held lock state */
6869 2772578 : desired_state -= BM_LOCK_VAL_SHARED;
6870 :
6871 : /* new lock level */
6872 2772578 : desired_state += BM_LOCK_VAL_SHARE_EXCLUSIVE;
6873 :
6874 2772578 : if (likely(pg_atomic_compare_exchange_u64(&buf_hdr->state,
6875 : &old_state, desired_state)))
6876 : {
6877 2772568 : ref->data.lockmode = BUFFER_LOCK_SHARE_EXCLUSIVE;
6878 2772568 : *lockstate = desired_state;
6879 :
6880 2772568 : return true;
6881 : }
6882 : }
6883 : }
6884 :
6885 : /*
6886 : * Try to acquire the right to set hint bits on the buffer.
6887 : *
6888 : * To be allowed to set hint bits, this backend needs to hold either a
6889 : * share-exclusive or an exclusive lock. In case this backend only holds a
6890 : * share lock, this function will try to upgrade the lock to
6891 : * share-exclusive. The caller is only allowed to set hint bits if true is
6892 : * returned.
6893 : *
6894 : * Once BufferBeginSetHintBits() has returned true, hint bits may be set
6895 : * without further calls to BufferBeginSetHintBits(), until the buffer is
6896 : * unlocked.
6897 : *
6898 : *
6899 : * Requiring a share-exclusive lock to set hint bits prevents setting hint
6900 : * bits on buffers that are currently being written out, which could corrupt
6901 : * the checksum on the page. Flushing buffers also requires a share-exclusive
6902 : * lock.
6903 : *
6904 : * Due to a lock >= share-exclusive being required to set hint bits, only one
6905 : * backend can set hint bits at a time. Allowing multiple backends to set hint
6906 : * bits would require more complicated locking: For setting hint bits we'd
6907 : * need to store the count of backends currently setting hint bits, for I/O we
6908 : * would need another lock-level conflicting with the hint-setting
6909 : * lock-level. Given that the share-exclusive lock for setting hint bits is
6910 : * only held for a short time, that backends often would just set the same
6911 : * hint bits and that the cost of occasionally not setting hint bits in hotly
6912 : * accessed pages is fairly low, this seems like an acceptable tradeoff.
6913 : */
6914 : bool
6915 408459 : BufferBeginSetHintBits(Buffer buffer)
6916 : {
6917 : BufferDesc *buf_hdr;
6918 : uint64 lockstate;
6919 :
6920 408459 : if (BufferIsLocal(buffer))
6921 : {
6922 : /*
6923 : * NB: Will need to check if there is a write in progress, once it is
6924 : * possible for writes to be done asynchronously.
6925 : */
6926 5599 : return true;
6927 : }
6928 :
6929 402860 : buf_hdr = GetBufferDescriptor(buffer - 1);
6930 :
6931 402860 : return SharedBufferBeginSetHintBits(buffer, buf_hdr, &lockstate);
6932 : }
6933 :
6934 : /*
6935 : * End a phase of setting hint bits on this buffer, started with
6936 : * BufferBeginSetHintBits().
6937 : *
6938 : * This would strictly speaking not be required (i.e. the caller could do
6939 : * MarkBufferDirtyHint() if so desired), but allows us to perform some sanity
6940 : * checks.
6941 : */
6942 : void
6943 408449 : BufferFinishSetHintBits(Buffer buffer, bool mark_dirty, bool buffer_std)
6944 : {
6945 : if (!BufferIsLocal(buffer))
6946 : Assert(BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_SHARE_EXCLUSIVE) ||
6947 : BufferIsLockedByMeInMode(buffer, BUFFER_LOCK_EXCLUSIVE));
6948 :
6949 408449 : if (mark_dirty)
6950 221065 : MarkBufferDirtyHint(buffer, buffer_std);
6951 408449 : }
6952 :
6953 : /*
6954 : * Try to set hint bits on a single 16bit value in a buffer.
6955 : *
6956 : * If hint bits are allowed to be set, set *ptr = val, try to mark the buffer
6957 : * dirty and return true. Otherwise false is returned.
6958 : *
6959 : * *ptr needs to be a pointer to memory within the buffer.
6960 : *
6961 : * This is a bit faster than BufferBeginSetHintBits() /
6962 : * BufferFinishSetHintBits() when setting hints once in a buffer, but slower
6963 : * than the former when setting hint bits multiple times in the same buffer.
6964 : */
6965 : bool
6966 6569848 : BufferSetHintBits16(uint16 *ptr, uint16 val, Buffer buffer)
6967 : {
6968 : BufferDesc *buf_hdr;
6969 : uint64 lockstate;
6970 : #ifdef USE_ASSERT_CHECKING
6971 : char *page;
6972 :
6973 : /* verify that the address is on the page */
6974 : page = BufferGetPage(buffer);
6975 : Assert((char *) ptr >= page && (char *) ptr < (page + BLCKSZ));
6976 : #endif
6977 :
6978 6569848 : if (BufferIsLocal(buffer))
6979 : {
6980 239474 : *ptr = val;
6981 :
6982 239474 : MarkLocalBufferDirty(buffer);
6983 :
6984 239474 : return true;
6985 : }
6986 :
6987 6330374 : buf_hdr = GetBufferDescriptor(buffer - 1);
6988 :
6989 6330374 : if (SharedBufferBeginSetHintBits(buffer, buf_hdr, &lockstate))
6990 : {
6991 6330339 : *ptr = val;
6992 :
6993 6330339 : MarkSharedBufferDirtyHint(buffer, buf_hdr, lockstate, true);
6994 :
6995 6330339 : return true;
6996 : }
6997 :
6998 35 : return false;
6999 : }
7000 :
7001 :
7002 : /*
7003 : * Functions for buffer I/O handling
7004 : *
7005 : * Also note that these are used only for shared buffers, not local ones.
7006 : */
7007 :
7008 : /*
7009 : * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
7010 : */
7011 : static void
7012 5564 : WaitIO(BufferDesc *buf)
7013 : {
7014 5564 : ConditionVariable *cv = BufferDescriptorGetIOCV(buf);
7015 :
7016 5564 : ConditionVariablePrepareToSleep(cv);
7017 : for (;;)
7018 2377 : {
7019 : uint64 buf_state;
7020 : PgAioWaitRef iow;
7021 :
7022 : /*
7023 : * It may not be necessary to acquire the spinlock to check the flag
7024 : * here, but since this test is essential for correctness, we'd better
7025 : * play it safe.
7026 : */
7027 7941 : buf_state = LockBufHdr(buf);
7028 :
7029 : /*
7030 : * Copy the wait reference while holding the spinlock. This protects
7031 : * against a concurrent TerminateBufferIO() in another backend from
7032 : * clearing the wref while it's being read.
7033 : */
7034 7941 : iow = buf->io_wref;
7035 7941 : UnlockBufHdr(buf);
7036 :
7037 : /* no IO in progress, we don't need to wait */
7038 7941 : if (!(buf_state & BM_IO_IN_PROGRESS))
7039 5564 : break;
7040 :
7041 : /*
7042 : * The buffer has asynchronous IO in progress, wait for it to
7043 : * complete.
7044 : */
7045 2377 : if (pgaio_wref_valid(&iow))
7046 : {
7047 2181 : pgaio_wref_wait(&iow);
7048 :
7049 : /*
7050 : * The AIO subsystem internally uses condition variables and thus
7051 : * might remove this backend from the BufferDesc's CV. While that
7052 : * wouldn't cause a correctness issue (the first CV sleep just
7053 : * immediately returns if not already registered), it seems worth
7054 : * avoiding unnecessary loop iterations, given that we take care
7055 : * to do so at the start of the function.
7056 : */
7057 2181 : ConditionVariablePrepareToSleep(cv);
7058 2181 : continue;
7059 : }
7060 :
7061 : /* wait on BufferDesc->cv, e.g. for concurrent synchronous IO */
7062 196 : ConditionVariableSleep(cv, WAIT_EVENT_BUFFER_IO);
7063 : }
7064 5564 : ConditionVariableCancelSleep();
7065 5564 : }
7066 :
7067 : /*
7068 : * StartBufferIO: begin I/O on this buffer
7069 : * (Assumptions)
7070 : * My process is executing no IO on this buffer
7071 : * The buffer is Pinned
7072 : *
7073 : * In some scenarios multiple backends could attempt the same I/O operation
7074 : * concurrently. If someone else has already started I/O on this buffer then
7075 : * we will wait for completion of the IO using WaitIO().
7076 : *
7077 : * Input operations are only attempted on buffers that are not BM_VALID,
7078 : * and output operations only on buffers that are BM_VALID and BM_DIRTY,
7079 : * so we can always tell if the work is already done.
7080 : *
7081 : * Returns true if we successfully marked the buffer as I/O busy,
7082 : * false if someone else already did the work.
7083 : *
7084 : * If nowait is true, then we don't wait for an I/O to be finished by another
7085 : * backend. In that case, false indicates either that the I/O was already
7086 : * finished, or is still in progress. This is useful for callers that want to
7087 : * find out if they can perform the I/O as part of a larger operation, without
7088 : * waiting for the answer or distinguishing the reasons why not.
7089 : */
7090 : bool
7091 2582017 : StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
7092 : {
7093 : uint64 buf_state;
7094 :
7095 2582017 : ResourceOwnerEnlarge(CurrentResourceOwner);
7096 :
7097 : for (;;)
7098 : {
7099 2584403 : buf_state = LockBufHdr(buf);
7100 :
7101 2584403 : if (!(buf_state & BM_IO_IN_PROGRESS))
7102 2582013 : break;
7103 2390 : UnlockBufHdr(buf);
7104 2390 : if (nowait)
7105 4 : return false;
7106 2386 : WaitIO(buf);
7107 : }
7108 :
7109 : /* Once we get here, there is definitely no I/O active on this buffer */
7110 :
7111 : /* Check if someone else already did the I/O */
7112 2582013 : if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
7113 : {
7114 2586 : UnlockBufHdr(buf);
7115 2586 : return false;
7116 : }
7117 :
7118 2579427 : UnlockBufHdrExt(buf, buf_state,
7119 : BM_IO_IN_PROGRESS, 0,
7120 : 0);
7121 :
7122 2579427 : ResourceOwnerRememberBufferIO(CurrentResourceOwner,
7123 : BufferDescriptorGetBuffer(buf));
7124 :
7125 2579427 : return true;
7126 : }
7127 :
7128 : /*
7129 : * TerminateBufferIO: release a buffer we were doing I/O on
7130 : * (Assumptions)
7131 : * My process is executing IO for the buffer
7132 : * BM_IO_IN_PROGRESS bit is set for the buffer
7133 : * The buffer is Pinned
7134 : *
7135 : * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
7136 : * buffer's BM_DIRTY flag. This is appropriate when terminating a
7137 : * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
7138 : * marking the buffer clean if it was re-dirtied while we were writing.
7139 : *
7140 : * set_flag_bits gets ORed into the buffer's flags. It must include
7141 : * BM_IO_ERROR in a failure case. For successful completion it could
7142 : * be 0, or BM_VALID if we just finished reading in the page.
7143 : *
7144 : * If forget_owner is true, we release the buffer I/O from the current
7145 : * resource owner. (forget_owner=false is used when the resource owner itself
7146 : * is being released)
7147 : */
7148 : void
7149 2411849 : TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint64 set_flag_bits,
7150 : bool forget_owner, bool release_aio)
7151 : {
7152 : uint64 buf_state;
7153 2411849 : uint64 unset_flag_bits = 0;
7154 2411849 : int refcount_change = 0;
7155 :
7156 2411849 : buf_state = LockBufHdr(buf);
7157 :
7158 : Assert(buf_state & BM_IO_IN_PROGRESS);
7159 2411849 : unset_flag_bits |= BM_IO_IN_PROGRESS;
7160 :
7161 : /* Clear earlier errors, if this IO failed, it'll be marked again */
7162 2411849 : unset_flag_bits |= BM_IO_ERROR;
7163 :
7164 2411849 : if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
7165 579391 : unset_flag_bits |= BM_DIRTY | BM_CHECKPOINT_NEEDED;
7166 :
7167 2411849 : if (release_aio)
7168 : {
7169 : /* release ownership by the AIO subsystem */
7170 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
7171 1315143 : refcount_change = -1;
7172 1315143 : pgaio_wref_clear(&buf->io_wref);
7173 : }
7174 :
7175 2411849 : buf_state = UnlockBufHdrExt(buf, buf_state,
7176 : set_flag_bits, unset_flag_bits,
7177 : refcount_change);
7178 :
7179 2411849 : if (forget_owner)
7180 1096685 : ResourceOwnerForgetBufferIO(CurrentResourceOwner,
7181 : BufferDescriptorGetBuffer(buf));
7182 :
7183 2411849 : ConditionVariableBroadcast(BufferDescriptorGetIOCV(buf));
7184 :
7185 : /*
7186 : * Support LockBufferForCleanup()
7187 : *
7188 : * We may have just released the last pin other than the waiter's. In most
7189 : * cases, this backend holds another pin on the buffer. But, if, for
7190 : * example, this backend is completing an IO issued by another backend, it
7191 : * may be time to wake the waiter.
7192 : */
7193 2411849 : if (release_aio && (buf_state & BM_PIN_COUNT_WAITER))
7194 0 : WakePinCountWaiter(buf);
7195 2411849 : }
7196 :
7197 : /*
7198 : * AbortBufferIO: Clean up active buffer I/O after an error.
7199 : *
7200 : * All LWLocks & content locks we might have held have been released, but we
7201 : * haven't yet released buffer pins, so the buffer is still pinned.
7202 : *
7203 : * If I/O was in progress, we always set BM_IO_ERROR, even though it's
7204 : * possible the error condition wasn't related to the I/O.
7205 : *
7206 : * Note: this does not remove the buffer I/O from the resource owner.
7207 : * That's correct when we're releasing the whole resource owner, but
7208 : * beware if you use this in other contexts.
7209 : */
7210 : static void
7211 15 : AbortBufferIO(Buffer buffer)
7212 : {
7213 15 : BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
7214 : uint64 buf_state;
7215 :
7216 15 : buf_state = LockBufHdr(buf_hdr);
7217 : Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
7218 :
7219 15 : if (!(buf_state & BM_VALID))
7220 : {
7221 : Assert(!(buf_state & BM_DIRTY));
7222 15 : UnlockBufHdr(buf_hdr);
7223 : }
7224 : else
7225 : {
7226 : Assert(buf_state & BM_DIRTY);
7227 0 : UnlockBufHdr(buf_hdr);
7228 :
7229 : /* Issue notice if this is not the first failure... */
7230 0 : if (buf_state & BM_IO_ERROR)
7231 : {
7232 : /* Buffer is pinned, so we can read tag without spinlock */
7233 0 : ereport(WARNING,
7234 : (errcode(ERRCODE_IO_ERROR),
7235 : errmsg("could not write block %u of %s",
7236 : buf_hdr->tag.blockNum,
7237 : relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
7238 : BufTagGetForkNum(&buf_hdr->tag)).str),
7239 : errdetail("Multiple failures --- write error might be permanent.")));
7240 : }
7241 : }
7242 :
7243 15 : TerminateBufferIO(buf_hdr, false, BM_IO_ERROR, false, false);
7244 15 : }
7245 :
7246 : /*
7247 : * Error context callback for errors occurring during shared buffer writes.
7248 : */
7249 : static void
7250 35 : shared_buffer_write_error_callback(void *arg)
7251 : {
7252 35 : BufferDesc *bufHdr = (BufferDesc *) arg;
7253 :
7254 : /* Buffer is pinned, so we can read the tag without locking the spinlock */
7255 35 : if (bufHdr != NULL)
7256 70 : errcontext("writing block %u of relation \"%s\"",
7257 : bufHdr->tag.blockNum,
7258 35 : relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
7259 : BufTagGetForkNum(&bufHdr->tag)).str);
7260 35 : }
7261 :
7262 : /*
7263 : * Error context callback for errors occurring during local buffer writes.
7264 : */
7265 : static void
7266 0 : local_buffer_write_error_callback(void *arg)
7267 : {
7268 0 : BufferDesc *bufHdr = (BufferDesc *) arg;
7269 :
7270 0 : if (bufHdr != NULL)
7271 0 : errcontext("writing block %u of relation \"%s\"",
7272 : bufHdr->tag.blockNum,
7273 0 : relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
7274 : MyProcNumber,
7275 : BufTagGetForkNum(&bufHdr->tag)).str);
7276 0 : }
7277 :
7278 : /*
7279 : * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
7280 : */
7281 : static int
7282 9956076 : rlocator_comparator(const void *p1, const void *p2)
7283 : {
7284 9956076 : RelFileLocator n1 = *(const RelFileLocator *) p1;
7285 9956076 : RelFileLocator n2 = *(const RelFileLocator *) p2;
7286 :
7287 9956076 : if (n1.relNumber < n2.relNumber)
7288 9920849 : return -1;
7289 35227 : else if (n1.relNumber > n2.relNumber)
7290 33657 : return 1;
7291 :
7292 1570 : if (n1.dbOid < n2.dbOid)
7293 0 : return -1;
7294 1570 : else if (n1.dbOid > n2.dbOid)
7295 0 : return 1;
7296 :
7297 1570 : if (n1.spcOid < n2.spcOid)
7298 0 : return -1;
7299 1570 : else if (n1.spcOid > n2.spcOid)
7300 0 : return 1;
7301 : else
7302 1570 : return 0;
7303 : }
7304 :
7305 : /*
7306 : * Lock buffer header - set BM_LOCKED in buffer state.
7307 : */
7308 : uint64
7309 33251620 : LockBufHdr(BufferDesc *desc)
7310 : {
7311 : uint64 old_buf_state;
7312 :
7313 : Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
7314 :
7315 : while (true)
7316 : {
7317 : /*
7318 : * Always try once to acquire the lock directly, without setting up
7319 : * the spin-delay infrastructure. The work necessary for that shows up
7320 : * in profiles and is rarely necessary.
7321 : */
7322 33254504 : old_buf_state = pg_atomic_fetch_or_u64(&desc->state, BM_LOCKED);
7323 33254504 : if (likely(!(old_buf_state & BM_LOCKED)))
7324 33251620 : break; /* got lock */
7325 :
7326 : /* and then spin without atomic operations until lock is released */
7327 : {
7328 : SpinDelayStatus delayStatus;
7329 :
7330 2884 : init_local_spin_delay(&delayStatus);
7331 :
7332 7151 : while (old_buf_state & BM_LOCKED)
7333 : {
7334 4267 : perform_spin_delay(&delayStatus);
7335 4267 : old_buf_state = pg_atomic_read_u64(&desc->state);
7336 : }
7337 2884 : finish_spin_delay(&delayStatus);
7338 : }
7339 :
7340 : /*
7341 : * Retry. The lock might obviously already be re-acquired by the time
7342 : * we're attempting to get it again.
7343 : */
7344 : }
7345 :
7346 33251620 : return old_buf_state | BM_LOCKED;
7347 : }
7348 :
7349 : /*
7350 : * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
7351 : * state at that point.
7352 : *
7353 : * Obviously the buffer could be locked by the time the value is returned, so
7354 : * this is primarily useful in CAS style loops.
7355 : */
7356 : pg_noinline uint64
7357 1332 : WaitBufHdrUnlocked(BufferDesc *buf)
7358 : {
7359 : SpinDelayStatus delayStatus;
7360 : uint64 buf_state;
7361 :
7362 1332 : init_local_spin_delay(&delayStatus);
7363 :
7364 1332 : buf_state = pg_atomic_read_u64(&buf->state);
7365 :
7366 29938 : while (buf_state & BM_LOCKED)
7367 : {
7368 28606 : perform_spin_delay(&delayStatus);
7369 28606 : buf_state = pg_atomic_read_u64(&buf->state);
7370 : }
7371 :
7372 1332 : finish_spin_delay(&delayStatus);
7373 :
7374 1332 : return buf_state;
7375 : }
7376 :
7377 : /*
7378 : * BufferTag comparator.
7379 : */
7380 : static inline int
7381 0 : buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
7382 : {
7383 : int ret;
7384 : RelFileLocator rlocatora;
7385 : RelFileLocator rlocatorb;
7386 :
7387 0 : rlocatora = BufTagGetRelFileLocator(ba);
7388 0 : rlocatorb = BufTagGetRelFileLocator(bb);
7389 :
7390 0 : ret = rlocator_comparator(&rlocatora, &rlocatorb);
7391 :
7392 0 : if (ret != 0)
7393 0 : return ret;
7394 :
7395 0 : if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
7396 0 : return -1;
7397 0 : if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
7398 0 : return 1;
7399 :
7400 0 : if (ba->blockNum < bb->blockNum)
7401 0 : return -1;
7402 0 : if (ba->blockNum > bb->blockNum)
7403 0 : return 1;
7404 :
7405 0 : return 0;
7406 : }
7407 :
7408 : /*
7409 : * Comparator determining the writeout order in a checkpoint.
7410 : *
7411 : * It is important that tablespaces are compared first, the logic balancing
7412 : * writes between tablespaces relies on it.
7413 : */
7414 : static inline int
7415 3084301 : ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
7416 : {
7417 : /* compare tablespace */
7418 3084301 : if (a->tsId < b->tsId)
7419 7266 : return -1;
7420 3077035 : else if (a->tsId > b->tsId)
7421 25099 : return 1;
7422 : /* compare relation */
7423 3051936 : if (a->relNumber < b->relNumber)
7424 859878 : return -1;
7425 2192058 : else if (a->relNumber > b->relNumber)
7426 843903 : return 1;
7427 : /* compare fork */
7428 1348155 : else if (a->forkNum < b->forkNum)
7429 56443 : return -1;
7430 1291712 : else if (a->forkNum > b->forkNum)
7431 63267 : return 1;
7432 : /* compare block number */
7433 1228445 : else if (a->blockNum < b->blockNum)
7434 605377 : return -1;
7435 623068 : else if (a->blockNum > b->blockNum)
7436 580146 : return 1;
7437 : /* equal page IDs are unlikely, but not impossible */
7438 42922 : return 0;
7439 : }
7440 :
7441 : /*
7442 : * Comparator for a Min-Heap over the per-tablespace checkpoint completion
7443 : * progress.
7444 : */
7445 : static int
7446 252801 : ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
7447 : {
7448 252801 : CkptTsStatus *sa = (CkptTsStatus *) DatumGetPointer(a);
7449 252801 : CkptTsStatus *sb = (CkptTsStatus *) DatumGetPointer(b);
7450 :
7451 : /* we want a min-heap, so return 1 for the a < b */
7452 252801 : if (sa->progress < sb->progress)
7453 227513 : return 1;
7454 25288 : else if (sa->progress == sb->progress)
7455 781 : return 0;
7456 : else
7457 24507 : return -1;
7458 : }
7459 :
7460 : /*
7461 : * Initialize a writeback context, discarding potential previous state.
7462 : *
7463 : * *max_pending is a pointer instead of an immediate value, so the coalesce
7464 : * limits can easily changed by the GUC mechanism, and so calling code does
7465 : * not have to check the current configuration. A value of 0 means that no
7466 : * writeback control will be performed.
7467 : */
7468 : void
7469 2849 : WritebackContextInit(WritebackContext *context, int *max_pending)
7470 : {
7471 : Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
7472 :
7473 2849 : context->max_pending = max_pending;
7474 2849 : context->nr_pending = 0;
7475 2849 : }
7476 :
7477 : /*
7478 : * Add buffer to list of pending writeback requests.
7479 : */
7480 : void
7481 575767 : ScheduleBufferTagForWriteback(WritebackContext *wb_context, IOContext io_context,
7482 : BufferTag *tag)
7483 : {
7484 : PendingWriteback *pending;
7485 :
7486 : /*
7487 : * As pg_flush_data() doesn't do anything with fsync disabled, there's no
7488 : * point in tracking in that case.
7489 : */
7490 575767 : if (io_direct_flags & IO_DIRECT_DATA ||
7491 575238 : !enableFsync)
7492 575767 : return;
7493 :
7494 : /*
7495 : * Add buffer to the pending writeback array, unless writeback control is
7496 : * disabled.
7497 : */
7498 0 : if (*wb_context->max_pending > 0)
7499 : {
7500 : Assert(*wb_context->max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
7501 :
7502 0 : pending = &wb_context->pending_writebacks[wb_context->nr_pending++];
7503 :
7504 0 : pending->tag = *tag;
7505 : }
7506 :
7507 : /*
7508 : * Perform pending flushes if the writeback limit is exceeded. This
7509 : * includes the case where previously an item has been added, but control
7510 : * is now disabled.
7511 : */
7512 0 : if (wb_context->nr_pending >= *wb_context->max_pending)
7513 0 : IssuePendingWritebacks(wb_context, io_context);
7514 : }
7515 :
7516 : #define ST_SORT sort_pending_writebacks
7517 : #define ST_ELEMENT_TYPE PendingWriteback
7518 : #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
7519 : #define ST_SCOPE static
7520 : #define ST_DEFINE
7521 : #include "lib/sort_template.h"
7522 :
7523 : /*
7524 : * Issue all pending writeback requests, previously scheduled with
7525 : * ScheduleBufferTagForWriteback, to the OS.
7526 : *
7527 : * Because this is only used to improve the OSs IO scheduling we try to never
7528 : * error out - it's just a hint.
7529 : */
7530 : void
7531 1105 : IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
7532 : {
7533 : instr_time io_start;
7534 : int i;
7535 :
7536 1105 : if (wb_context->nr_pending == 0)
7537 1105 : return;
7538 :
7539 : /*
7540 : * Executing the writes in-order can make them a lot faster, and allows to
7541 : * merge writeback requests to consecutive blocks into larger writebacks.
7542 : */
7543 0 : sort_pending_writebacks(wb_context->pending_writebacks,
7544 0 : wb_context->nr_pending);
7545 :
7546 0 : io_start = pgstat_prepare_io_time(track_io_timing);
7547 :
7548 : /*
7549 : * Coalesce neighbouring writes, but nothing else. For that we iterate
7550 : * through the, now sorted, array of pending flushes, and look forward to
7551 : * find all neighbouring (or identical) writes.
7552 : */
7553 0 : for (i = 0; i < wb_context->nr_pending; i++)
7554 : {
7555 : PendingWriteback *cur;
7556 : PendingWriteback *next;
7557 : SMgrRelation reln;
7558 : int ahead;
7559 : BufferTag tag;
7560 : RelFileLocator currlocator;
7561 0 : Size nblocks = 1;
7562 :
7563 0 : cur = &wb_context->pending_writebacks[i];
7564 0 : tag = cur->tag;
7565 0 : currlocator = BufTagGetRelFileLocator(&tag);
7566 :
7567 : /*
7568 : * Peek ahead, into following writeback requests, to see if they can
7569 : * be combined with the current one.
7570 : */
7571 0 : for (ahead = 0; i + ahead + 1 < wb_context->nr_pending; ahead++)
7572 : {
7573 :
7574 0 : next = &wb_context->pending_writebacks[i + ahead + 1];
7575 :
7576 : /* different file, stop */
7577 0 : if (!RelFileLocatorEquals(currlocator,
7578 0 : BufTagGetRelFileLocator(&next->tag)) ||
7579 0 : BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
7580 : break;
7581 :
7582 : /* ok, block queued twice, skip */
7583 0 : if (cur->tag.blockNum == next->tag.blockNum)
7584 0 : continue;
7585 :
7586 : /* only merge consecutive writes */
7587 0 : if (cur->tag.blockNum + 1 != next->tag.blockNum)
7588 0 : break;
7589 :
7590 0 : nblocks++;
7591 0 : cur = next;
7592 : }
7593 :
7594 0 : i += ahead;
7595 :
7596 : /* and finally tell the kernel to write the data to storage */
7597 0 : reln = smgropen(currlocator, INVALID_PROC_NUMBER);
7598 0 : smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
7599 : }
7600 :
7601 : /*
7602 : * Assume that writeback requests are only issued for buffers containing
7603 : * blocks of permanent relations.
7604 : */
7605 0 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
7606 0 : IOOP_WRITEBACK, io_start, wb_context->nr_pending, 0);
7607 :
7608 0 : wb_context->nr_pending = 0;
7609 : }
7610 :
7611 : /* ResourceOwner callbacks */
7612 :
7613 : static void
7614 15 : ResOwnerReleaseBufferIO(Datum res)
7615 : {
7616 15 : Buffer buffer = DatumGetInt32(res);
7617 :
7618 15 : AbortBufferIO(buffer);
7619 15 : }
7620 :
7621 : static char *
7622 0 : ResOwnerPrintBufferIO(Datum res)
7623 : {
7624 0 : Buffer buffer = DatumGetInt32(res);
7625 :
7626 0 : return psprintf("lost track of buffer IO on buffer %d", buffer);
7627 : }
7628 :
7629 : /*
7630 : * Release buffer as part of resource owner cleanup. This will only be called
7631 : * if the buffer is pinned. If this backend held the content lock at the time
7632 : * of the error we also need to release that (note that it is not possible to
7633 : * hold a content lock without a pin).
7634 : */
7635 : static void
7636 7711 : ResOwnerReleaseBuffer(Datum res)
7637 : {
7638 7711 : Buffer buffer = DatumGetInt32(res);
7639 :
7640 : /* Like ReleaseBuffer, but don't call ResourceOwnerForgetBuffer */
7641 7711 : if (!BufferIsValid(buffer))
7642 0 : elog(ERROR, "bad buffer ID: %d", buffer);
7643 :
7644 7711 : if (BufferIsLocal(buffer))
7645 3033 : UnpinLocalBufferNoOwner(buffer);
7646 : else
7647 : {
7648 : PrivateRefCountEntry *ref;
7649 :
7650 4678 : ref = GetPrivateRefCountEntry(buffer, false);
7651 :
7652 : /* not having a private refcount would imply resowner corruption */
7653 : Assert(ref != NULL);
7654 :
7655 : /*
7656 : * If the buffer was locked at the time of the resowner release,
7657 : * release the lock now. This should only happen after errors.
7658 : */
7659 4678 : if (ref->data.lockmode != BUFFER_LOCK_UNLOCK)
7660 : {
7661 110 : BufferDesc *buf = GetBufferDescriptor(buffer - 1);
7662 :
7663 110 : HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
7664 110 : BufferLockUnlock(buffer, buf);
7665 : }
7666 :
7667 4678 : UnpinBufferNoOwner(GetBufferDescriptor(buffer - 1));
7668 : }
7669 7711 : }
7670 :
7671 : static char *
7672 0 : ResOwnerPrintBuffer(Datum res)
7673 : {
7674 0 : return DebugPrintBufferRefcount(DatumGetInt32(res));
7675 : }
7676 :
7677 : /*
7678 : * Helper function to evict unpinned buffer whose buffer header lock is
7679 : * already acquired.
7680 : */
7681 : static bool
7682 2129 : EvictUnpinnedBufferInternal(BufferDesc *desc, bool *buffer_flushed)
7683 : {
7684 : uint64 buf_state;
7685 : bool result;
7686 :
7687 2129 : *buffer_flushed = false;
7688 :
7689 2129 : buf_state = pg_atomic_read_u64(&(desc->state));
7690 : Assert(buf_state & BM_LOCKED);
7691 :
7692 2129 : if ((buf_state & BM_VALID) == 0)
7693 : {
7694 0 : UnlockBufHdr(desc);
7695 0 : return false;
7696 : }
7697 :
7698 : /* Check that it's not pinned already. */
7699 2129 : if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
7700 : {
7701 0 : UnlockBufHdr(desc);
7702 0 : return false;
7703 : }
7704 :
7705 2129 : PinBuffer_Locked(desc); /* releases spinlock */
7706 :
7707 : /* If it was dirty, try to clean it once. */
7708 2129 : if (buf_state & BM_DIRTY)
7709 : {
7710 965 : FlushUnlockedBuffer(desc, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
7711 965 : *buffer_flushed = true;
7712 : }
7713 :
7714 : /* This will return false if it becomes dirty or someone else pins it. */
7715 2129 : result = InvalidateVictimBuffer(desc);
7716 :
7717 2129 : UnpinBuffer(desc);
7718 :
7719 2129 : return result;
7720 : }
7721 :
7722 : /*
7723 : * Try to evict the current block in a shared buffer.
7724 : *
7725 : * This function is intended for testing/development use only!
7726 : *
7727 : * To succeed, the buffer must not be pinned on entry, so if the caller had a
7728 : * particular block in mind, it might already have been replaced by some other
7729 : * block by the time this function runs. It's also unpinned on return, so the
7730 : * buffer might be occupied again by the time control is returned, potentially
7731 : * even by the same block. This inherent raciness without other interlocking
7732 : * makes the function unsuitable for non-testing usage.
7733 : *
7734 : * *buffer_flushed is set to true if the buffer was dirty and has been
7735 : * flushed, false otherwise. However, *buffer_flushed=true does not
7736 : * necessarily mean that we flushed the buffer, it could have been flushed by
7737 : * someone else.
7738 : *
7739 : * Returns true if the buffer was valid and it has now been made invalid.
7740 : * Returns false if it wasn't valid, if it couldn't be evicted due to a pin,
7741 : * or if the buffer becomes dirty again while we're trying to write it out.
7742 : */
7743 : bool
7744 140 : EvictUnpinnedBuffer(Buffer buf, bool *buffer_flushed)
7745 : {
7746 : BufferDesc *desc;
7747 :
7748 : Assert(BufferIsValid(buf) && !BufferIsLocal(buf));
7749 :
7750 : /* Make sure we can pin the buffer. */
7751 140 : ResourceOwnerEnlarge(CurrentResourceOwner);
7752 140 : ReservePrivateRefCountEntry();
7753 :
7754 140 : desc = GetBufferDescriptor(buf - 1);
7755 140 : LockBufHdr(desc);
7756 :
7757 140 : return EvictUnpinnedBufferInternal(desc, buffer_flushed);
7758 : }
7759 :
7760 : /*
7761 : * Try to evict all the shared buffers.
7762 : *
7763 : * This function is intended for testing/development use only! See
7764 : * EvictUnpinnedBuffer().
7765 : *
7766 : * The buffers_* parameters are mandatory and indicate the total count of
7767 : * buffers that:
7768 : * - buffers_evicted - were evicted
7769 : * - buffers_flushed - were flushed
7770 : * - buffers_skipped - could not be evicted
7771 : */
7772 : void
7773 1 : EvictAllUnpinnedBuffers(int32 *buffers_evicted, int32 *buffers_flushed,
7774 : int32 *buffers_skipped)
7775 : {
7776 1 : *buffers_evicted = 0;
7777 1 : *buffers_skipped = 0;
7778 1 : *buffers_flushed = 0;
7779 :
7780 16385 : for (int buf = 1; buf <= NBuffers; buf++)
7781 : {
7782 16384 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
7783 : uint64 buf_state;
7784 : bool buffer_flushed;
7785 :
7786 16384 : CHECK_FOR_INTERRUPTS();
7787 :
7788 16384 : buf_state = pg_atomic_read_u64(&desc->state);
7789 16384 : if (!(buf_state & BM_VALID))
7790 14395 : continue;
7791 :
7792 1989 : ResourceOwnerEnlarge(CurrentResourceOwner);
7793 1989 : ReservePrivateRefCountEntry();
7794 :
7795 1989 : LockBufHdr(desc);
7796 :
7797 1989 : if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
7798 1989 : (*buffers_evicted)++;
7799 : else
7800 0 : (*buffers_skipped)++;
7801 :
7802 1989 : if (buffer_flushed)
7803 946 : (*buffers_flushed)++;
7804 : }
7805 1 : }
7806 :
7807 : /*
7808 : * Try to evict all the shared buffers containing provided relation's pages.
7809 : *
7810 : * This function is intended for testing/development use only! See
7811 : * EvictUnpinnedBuffer().
7812 : *
7813 : * The caller must hold at least AccessShareLock on the relation to prevent
7814 : * the relation from being dropped.
7815 : *
7816 : * The buffers_* parameters are mandatory and indicate the total count of
7817 : * buffers that:
7818 : * - buffers_evicted - were evicted
7819 : * - buffers_flushed - were flushed
7820 : * - buffers_skipped - could not be evicted
7821 : */
7822 : void
7823 1 : EvictRelUnpinnedBuffers(Relation rel, int32 *buffers_evicted,
7824 : int32 *buffers_flushed, int32 *buffers_skipped)
7825 : {
7826 : Assert(!RelationUsesLocalBuffers(rel));
7827 :
7828 1 : *buffers_skipped = 0;
7829 1 : *buffers_evicted = 0;
7830 1 : *buffers_flushed = 0;
7831 :
7832 16385 : for (int buf = 1; buf <= NBuffers; buf++)
7833 : {
7834 16384 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
7835 16384 : uint64 buf_state = pg_atomic_read_u64(&(desc->state));
7836 : bool buffer_flushed;
7837 :
7838 16384 : CHECK_FOR_INTERRUPTS();
7839 :
7840 : /* An unlocked precheck should be safe and saves some cycles. */
7841 16384 : if ((buf_state & BM_VALID) == 0 ||
7842 27 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
7843 16384 : continue;
7844 :
7845 : /* Make sure we can pin the buffer. */
7846 0 : ResourceOwnerEnlarge(CurrentResourceOwner);
7847 0 : ReservePrivateRefCountEntry();
7848 :
7849 0 : buf_state = LockBufHdr(desc);
7850 :
7851 : /* recheck, could have changed without the lock */
7852 0 : if ((buf_state & BM_VALID) == 0 ||
7853 0 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
7854 : {
7855 0 : UnlockBufHdr(desc);
7856 0 : continue;
7857 : }
7858 :
7859 0 : if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
7860 0 : (*buffers_evicted)++;
7861 : else
7862 0 : (*buffers_skipped)++;
7863 :
7864 0 : if (buffer_flushed)
7865 0 : (*buffers_flushed)++;
7866 : }
7867 1 : }
7868 :
7869 : /*
7870 : * Helper function to mark unpinned buffer dirty whose buffer header lock is
7871 : * already acquired.
7872 : */
7873 : static bool
7874 36 : MarkDirtyUnpinnedBufferInternal(Buffer buf, BufferDesc *desc,
7875 : bool *buffer_already_dirty)
7876 : {
7877 : uint64 buf_state;
7878 36 : bool result = false;
7879 :
7880 36 : *buffer_already_dirty = false;
7881 :
7882 36 : buf_state = pg_atomic_read_u64(&(desc->state));
7883 : Assert(buf_state & BM_LOCKED);
7884 :
7885 36 : if ((buf_state & BM_VALID) == 0)
7886 : {
7887 1 : UnlockBufHdr(desc);
7888 1 : return false;
7889 : }
7890 :
7891 : /* Check that it's not pinned already. */
7892 35 : if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
7893 : {
7894 0 : UnlockBufHdr(desc);
7895 0 : return false;
7896 : }
7897 :
7898 : /* Pin the buffer and then release the buffer spinlock */
7899 35 : PinBuffer_Locked(desc);
7900 :
7901 : /* If it was not already dirty, mark it as dirty. */
7902 35 : if (!(buf_state & BM_DIRTY))
7903 : {
7904 16 : BufferLockAcquire(buf, desc, BUFFER_LOCK_EXCLUSIVE);
7905 16 : MarkBufferDirty(buf);
7906 16 : result = true;
7907 16 : BufferLockUnlock(buf, desc);
7908 : }
7909 : else
7910 19 : *buffer_already_dirty = true;
7911 :
7912 35 : UnpinBuffer(desc);
7913 :
7914 35 : return result;
7915 : }
7916 :
7917 : /*
7918 : * Try to mark the provided shared buffer as dirty.
7919 : *
7920 : * This function is intended for testing/development use only!
7921 : *
7922 : * Same as EvictUnpinnedBuffer() but with MarkBufferDirty() call inside.
7923 : *
7924 : * The buffer_already_dirty parameter is mandatory and indicate if the buffer
7925 : * could not be dirtied because it is already dirty.
7926 : *
7927 : * Returns true if the buffer has successfully been marked as dirty.
7928 : */
7929 : bool
7930 1 : MarkDirtyUnpinnedBuffer(Buffer buf, bool *buffer_already_dirty)
7931 : {
7932 : BufferDesc *desc;
7933 1 : bool buffer_dirtied = false;
7934 :
7935 : Assert(!BufferIsLocal(buf));
7936 :
7937 : /* Make sure we can pin the buffer. */
7938 1 : ResourceOwnerEnlarge(CurrentResourceOwner);
7939 1 : ReservePrivateRefCountEntry();
7940 :
7941 1 : desc = GetBufferDescriptor(buf - 1);
7942 1 : LockBufHdr(desc);
7943 :
7944 1 : buffer_dirtied = MarkDirtyUnpinnedBufferInternal(buf, desc, buffer_already_dirty);
7945 : /* Both can not be true at the same time */
7946 : Assert(!(buffer_dirtied && *buffer_already_dirty));
7947 :
7948 1 : return buffer_dirtied;
7949 : }
7950 :
7951 : /*
7952 : * Try to mark all the shared buffers containing provided relation's pages as
7953 : * dirty.
7954 : *
7955 : * This function is intended for testing/development use only! See
7956 : * MarkDirtyUnpinnedBuffer().
7957 : *
7958 : * The buffers_* parameters are mandatory and indicate the total count of
7959 : * buffers that:
7960 : * - buffers_dirtied - were dirtied
7961 : * - buffers_already_dirty - were already dirty
7962 : * - buffers_skipped - could not be dirtied because of a reason different
7963 : * than a buffer being already dirty.
7964 : */
7965 : void
7966 1 : MarkDirtyRelUnpinnedBuffers(Relation rel,
7967 : int32 *buffers_dirtied,
7968 : int32 *buffers_already_dirty,
7969 : int32 *buffers_skipped)
7970 : {
7971 : Assert(!RelationUsesLocalBuffers(rel));
7972 :
7973 1 : *buffers_dirtied = 0;
7974 1 : *buffers_already_dirty = 0;
7975 1 : *buffers_skipped = 0;
7976 :
7977 16385 : for (int buf = 1; buf <= NBuffers; buf++)
7978 : {
7979 16384 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
7980 16384 : uint64 buf_state = pg_atomic_read_u64(&(desc->state));
7981 : bool buffer_already_dirty;
7982 :
7983 16384 : CHECK_FOR_INTERRUPTS();
7984 :
7985 : /* An unlocked precheck should be safe and saves some cycles. */
7986 16384 : if ((buf_state & BM_VALID) == 0 ||
7987 27 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
7988 16384 : continue;
7989 :
7990 : /* Make sure we can pin the buffer. */
7991 0 : ResourceOwnerEnlarge(CurrentResourceOwner);
7992 0 : ReservePrivateRefCountEntry();
7993 :
7994 0 : buf_state = LockBufHdr(desc);
7995 :
7996 : /* recheck, could have changed without the lock */
7997 0 : if ((buf_state & BM_VALID) == 0 ||
7998 0 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
7999 : {
8000 0 : UnlockBufHdr(desc);
8001 0 : continue;
8002 : }
8003 :
8004 0 : if (MarkDirtyUnpinnedBufferInternal(buf, desc, &buffer_already_dirty))
8005 0 : (*buffers_dirtied)++;
8006 0 : else if (buffer_already_dirty)
8007 0 : (*buffers_already_dirty)++;
8008 : else
8009 0 : (*buffers_skipped)++;
8010 : }
8011 1 : }
8012 :
8013 : /*
8014 : * Try to mark all the shared buffers as dirty.
8015 : *
8016 : * This function is intended for testing/development use only! See
8017 : * MarkDirtyUnpinnedBuffer().
8018 : *
8019 : * See MarkDirtyRelUnpinnedBuffers() above for details about the buffers_*
8020 : * parameters.
8021 : */
8022 : void
8023 1 : MarkDirtyAllUnpinnedBuffers(int32 *buffers_dirtied,
8024 : int32 *buffers_already_dirty,
8025 : int32 *buffers_skipped)
8026 : {
8027 1 : *buffers_dirtied = 0;
8028 1 : *buffers_already_dirty = 0;
8029 1 : *buffers_skipped = 0;
8030 :
8031 16385 : for (int buf = 1; buf <= NBuffers; buf++)
8032 : {
8033 16384 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
8034 : uint64 buf_state;
8035 : bool buffer_already_dirty;
8036 :
8037 16384 : CHECK_FOR_INTERRUPTS();
8038 :
8039 16384 : buf_state = pg_atomic_read_u64(&desc->state);
8040 16384 : if (!(buf_state & BM_VALID))
8041 16349 : continue;
8042 :
8043 35 : ResourceOwnerEnlarge(CurrentResourceOwner);
8044 35 : ReservePrivateRefCountEntry();
8045 :
8046 35 : LockBufHdr(desc);
8047 :
8048 35 : if (MarkDirtyUnpinnedBufferInternal(buf, desc, &buffer_already_dirty))
8049 16 : (*buffers_dirtied)++;
8050 19 : else if (buffer_already_dirty)
8051 19 : (*buffers_already_dirty)++;
8052 : else
8053 0 : (*buffers_skipped)++;
8054 : }
8055 1 : }
8056 :
8057 : /*
8058 : * Generic implementation of the AIO handle staging callback for readv/writev
8059 : * on local/shared buffers.
8060 : *
8061 : * Each readv/writev can target multiple buffers. The buffers have already
8062 : * been registered with the IO handle.
8063 : *
8064 : * To make the IO ready for execution ("staging"), we need to ensure that the
8065 : * targeted buffers are in an appropriate state while the IO is ongoing. For
8066 : * that the AIO subsystem needs to have its own buffer pin, otherwise an error
8067 : * in this backend could lead to this backend's buffer pin being released as
8068 : * part of error handling, which in turn could lead to the buffer being
8069 : * replaced while IO is ongoing.
8070 : */
8071 : static pg_attribute_always_inline void
8072 1317785 : buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
8073 : {
8074 : uint64 *io_data;
8075 : uint8 handle_data_len;
8076 : PgAioWaitRef io_ref;
8077 1317785 : BufferTag first PG_USED_FOR_ASSERTS_ONLY = {0};
8078 :
8079 1317785 : io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
8080 :
8081 1317785 : pgaio_io_get_wref(ioh, &io_ref);
8082 :
8083 : /* iterate over all buffers affected by the vectored readv/writev */
8084 2808915 : for (int i = 0; i < handle_data_len; i++)
8085 : {
8086 1491130 : Buffer buffer = (Buffer) io_data[i];
8087 1491130 : BufferDesc *buf_hdr = is_temp ?
8088 8409 : GetLocalBufferDescriptor(-buffer - 1)
8089 1491130 : : GetBufferDescriptor(buffer - 1);
8090 : uint64 buf_state;
8091 :
8092 : /*
8093 : * Check that all the buffers are actually ones that could conceivably
8094 : * be done in one IO, i.e. are sequential. This is the last
8095 : * buffer-aware code before IO is actually executed and confusion
8096 : * about which buffers are targeted by IO can be hard to debug, making
8097 : * it worth doing extra-paranoid checks.
8098 : */
8099 1491130 : if (i == 0)
8100 1317785 : first = buf_hdr->tag;
8101 : else
8102 : {
8103 : Assert(buf_hdr->tag.relNumber == first.relNumber);
8104 : Assert(buf_hdr->tag.blockNum == first.blockNum + i);
8105 : }
8106 :
8107 1491130 : if (is_temp)
8108 8409 : buf_state = pg_atomic_read_u64(&buf_hdr->state);
8109 : else
8110 1482721 : buf_state = LockBufHdr(buf_hdr);
8111 :
8112 : /* verify the buffer is in the expected state */
8113 : Assert(buf_state & BM_TAG_VALID);
8114 : if (is_write)
8115 : {
8116 : Assert(buf_state & BM_VALID);
8117 : Assert(buf_state & BM_DIRTY);
8118 : }
8119 : else
8120 : {
8121 : Assert(!(buf_state & BM_VALID));
8122 : Assert(!(buf_state & BM_DIRTY));
8123 : }
8124 :
8125 : /* temp buffers don't use BM_IO_IN_PROGRESS */
8126 1491130 : if (!is_temp)
8127 : Assert(buf_state & BM_IO_IN_PROGRESS);
8128 :
8129 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) >= 1);
8130 :
8131 : /*
8132 : * Reflect that the buffer is now owned by the AIO subsystem.
8133 : *
8134 : * For local buffers: This can't be done just via LocalRefCount, as
8135 : * one might initially think, as this backend could error out while
8136 : * AIO is still in progress, releasing all the pins by the backend
8137 : * itself.
8138 : *
8139 : * This pin is released again in TerminateBufferIO().
8140 : */
8141 1491130 : buf_hdr->io_wref = io_ref;
8142 :
8143 1491130 : if (is_temp)
8144 : {
8145 8409 : buf_state += BUF_REFCOUNT_ONE;
8146 8409 : pg_atomic_unlocked_write_u64(&buf_hdr->state, buf_state);
8147 : }
8148 : else
8149 1482721 : UnlockBufHdrExt(buf_hdr, buf_state, 0, 0, 1);
8150 :
8151 : /*
8152 : * Ensure the content lock that prevents buffer modifications while
8153 : * the buffer is being written out is not released early due to an
8154 : * error.
8155 : */
8156 1491130 : if (is_write && !is_temp)
8157 : {
8158 : Assert(BufferLockHeldByMe(buf_hdr));
8159 :
8160 : /*
8161 : * Lock is now owned by AIO subsystem.
8162 : */
8163 0 : BufferLockDisown(buffer, buf_hdr);
8164 : }
8165 :
8166 : /*
8167 : * Stop tracking this buffer via the resowner - the AIO system now
8168 : * keeps track.
8169 : */
8170 1491130 : if (!is_temp)
8171 1482721 : ResourceOwnerForgetBufferIO(CurrentResourceOwner, buffer);
8172 : }
8173 1317785 : }
8174 :
8175 : /*
8176 : * Decode readv errors as encoded by buffer_readv_encode_error().
8177 : */
8178 : static inline void
8179 349 : buffer_readv_decode_error(PgAioResult result,
8180 : bool *zeroed_any,
8181 : bool *ignored_any,
8182 : uint8 *zeroed_or_error_count,
8183 : uint8 *checkfail_count,
8184 : uint8 *first_off)
8185 : {
8186 349 : uint32 rem_error = result.error_data;
8187 :
8188 : /* see static asserts in buffer_readv_encode_error */
8189 : #define READV_COUNT_BITS 7
8190 : #define READV_COUNT_MASK ((1 << READV_COUNT_BITS) - 1)
8191 :
8192 349 : *zeroed_any = rem_error & 1;
8193 349 : rem_error >>= 1;
8194 :
8195 349 : *ignored_any = rem_error & 1;
8196 349 : rem_error >>= 1;
8197 :
8198 349 : *zeroed_or_error_count = rem_error & READV_COUNT_MASK;
8199 349 : rem_error >>= READV_COUNT_BITS;
8200 :
8201 349 : *checkfail_count = rem_error & READV_COUNT_MASK;
8202 349 : rem_error >>= READV_COUNT_BITS;
8203 :
8204 349 : *first_off = rem_error & READV_COUNT_MASK;
8205 349 : rem_error >>= READV_COUNT_BITS;
8206 349 : }
8207 :
8208 : /*
8209 : * Helper to encode errors for buffer_readv_complete()
8210 : *
8211 : * Errors are encoded as follows:
8212 : * - bit 0 indicates whether any page was zeroed (1) or not (0)
8213 : * - bit 1 indicates whether any checksum failure was ignored (1) or not (0)
8214 : * - next READV_COUNT_BITS bits indicate the number of errored or zeroed pages
8215 : * - next READV_COUNT_BITS bits indicate the number of checksum failures
8216 : * - next READV_COUNT_BITS bits indicate the first offset of the first page
8217 : * that was errored or zeroed or, if no errors/zeroes, the first ignored
8218 : * checksum
8219 : */
8220 : static inline void
8221 192 : buffer_readv_encode_error(PgAioResult *result,
8222 : bool is_temp,
8223 : bool zeroed_any,
8224 : bool ignored_any,
8225 : uint8 error_count,
8226 : uint8 zeroed_count,
8227 : uint8 checkfail_count,
8228 : uint8 first_error_off,
8229 : uint8 first_zeroed_off,
8230 : uint8 first_ignored_off)
8231 : {
8232 :
8233 192 : uint8 shift = 0;
8234 192 : uint8 zeroed_or_error_count =
8235 : error_count > 0 ? error_count : zeroed_count;
8236 : uint8 first_off;
8237 :
8238 : StaticAssertDecl(PG_IOV_MAX <= 1 << READV_COUNT_BITS,
8239 : "PG_IOV_MAX is bigger than reserved space for error data");
8240 : StaticAssertDecl((1 + 1 + 3 * READV_COUNT_BITS) <= PGAIO_RESULT_ERROR_BITS,
8241 : "PGAIO_RESULT_ERROR_BITS is insufficient for buffer_readv");
8242 :
8243 : /*
8244 : * We only have space to encode one offset - but luckily that's good
8245 : * enough. If there is an error, the error is the interesting offset, same
8246 : * with a zeroed buffer vs an ignored buffer.
8247 : */
8248 192 : if (error_count > 0)
8249 94 : first_off = first_error_off;
8250 98 : else if (zeroed_count > 0)
8251 80 : first_off = first_zeroed_off;
8252 : else
8253 18 : first_off = first_ignored_off;
8254 :
8255 : Assert(!zeroed_any || error_count == 0);
8256 :
8257 192 : result->error_data = 0;
8258 :
8259 192 : result->error_data |= zeroed_any << shift;
8260 192 : shift += 1;
8261 :
8262 192 : result->error_data |= ignored_any << shift;
8263 192 : shift += 1;
8264 :
8265 192 : result->error_data |= ((uint32) zeroed_or_error_count) << shift;
8266 192 : shift += READV_COUNT_BITS;
8267 :
8268 192 : result->error_data |= ((uint32) checkfail_count) << shift;
8269 192 : shift += READV_COUNT_BITS;
8270 :
8271 192 : result->error_data |= ((uint32) first_off) << shift;
8272 192 : shift += READV_COUNT_BITS;
8273 :
8274 192 : result->id = is_temp ? PGAIO_HCB_LOCAL_BUFFER_READV :
8275 : PGAIO_HCB_SHARED_BUFFER_READV;
8276 :
8277 192 : if (error_count > 0)
8278 94 : result->status = PGAIO_RS_ERROR;
8279 : else
8280 98 : result->status = PGAIO_RS_WARNING;
8281 :
8282 : /*
8283 : * The encoding is complicated enough to warrant cross-checking it against
8284 : * the decode function.
8285 : */
8286 : #ifdef USE_ASSERT_CHECKING
8287 : {
8288 : bool zeroed_any_2,
8289 : ignored_any_2;
8290 : uint8 zeroed_or_error_count_2,
8291 : checkfail_count_2,
8292 : first_off_2;
8293 :
8294 : buffer_readv_decode_error(*result,
8295 : &zeroed_any_2, &ignored_any_2,
8296 : &zeroed_or_error_count_2,
8297 : &checkfail_count_2,
8298 : &first_off_2);
8299 : Assert(zeroed_any == zeroed_any_2);
8300 : Assert(ignored_any == ignored_any_2);
8301 : Assert(zeroed_or_error_count == zeroed_or_error_count_2);
8302 : Assert(checkfail_count == checkfail_count_2);
8303 : Assert(first_off == first_off_2);
8304 : }
8305 : #endif
8306 :
8307 : #undef READV_COUNT_BITS
8308 : #undef READV_COUNT_MASK
8309 192 : }
8310 :
8311 : /*
8312 : * Helper for AIO readv completion callbacks, supporting both shared and temp
8313 : * buffers. Gets called once for each buffer in a multi-page read.
8314 : */
8315 : static pg_attribute_always_inline void
8316 1323552 : buffer_readv_complete_one(PgAioTargetData *td, uint8 buf_off, Buffer buffer,
8317 : uint8 flags, bool failed, bool is_temp,
8318 : bool *buffer_invalid,
8319 : bool *failed_checksum,
8320 : bool *ignored_checksum,
8321 : bool *zeroed_buffer)
8322 : {
8323 1323552 : BufferDesc *buf_hdr = is_temp ?
8324 8409 : GetLocalBufferDescriptor(-buffer - 1)
8325 1323552 : : GetBufferDescriptor(buffer - 1);
8326 1323552 : BufferTag tag = buf_hdr->tag;
8327 1323552 : char *bufdata = BufferGetBlock(buffer);
8328 : uint64 set_flag_bits;
8329 : int piv_flags;
8330 :
8331 : /* check that the buffer is in the expected state for a read */
8332 : #ifdef USE_ASSERT_CHECKING
8333 : {
8334 : uint64 buf_state = pg_atomic_read_u64(&buf_hdr->state);
8335 :
8336 : Assert(buf_state & BM_TAG_VALID);
8337 : Assert(!(buf_state & BM_VALID));
8338 : /* temp buffers don't use BM_IO_IN_PROGRESS */
8339 : if (!is_temp)
8340 : Assert(buf_state & BM_IO_IN_PROGRESS);
8341 : Assert(!(buf_state & BM_DIRTY));
8342 : }
8343 : #endif
8344 :
8345 1323552 : *buffer_invalid = false;
8346 1323552 : *failed_checksum = false;
8347 1323552 : *ignored_checksum = false;
8348 1323552 : *zeroed_buffer = false;
8349 :
8350 : /*
8351 : * We ask PageIsVerified() to only log the message about checksum errors,
8352 : * as the completion might be run in any backend (or IO workers). We will
8353 : * report checksum errors in buffer_readv_report().
8354 : */
8355 1323552 : piv_flags = PIV_LOG_LOG;
8356 :
8357 : /* the local zero_damaged_pages may differ from the definer's */
8358 1323552 : if (flags & READ_BUFFERS_IGNORE_CHECKSUM_FAILURES)
8359 38 : piv_flags |= PIV_IGNORE_CHECKSUM_FAILURE;
8360 :
8361 : /* Check for garbage data. */
8362 1323552 : if (!failed)
8363 : {
8364 : /*
8365 : * If the buffer is not currently pinned by this backend, e.g. because
8366 : * we're completing this IO after an error, the buffer data will have
8367 : * been marked as inaccessible when the buffer was unpinned. The AIO
8368 : * subsystem holds a pin, but that doesn't prevent the buffer from
8369 : * having been marked as inaccessible. The completion might also be
8370 : * executed in a different process.
8371 : */
8372 : #ifdef USE_VALGRIND
8373 : if (!BufferIsPinned(buffer))
8374 : VALGRIND_MAKE_MEM_DEFINED(bufdata, BLCKSZ);
8375 : #endif
8376 :
8377 1323523 : if (!PageIsVerified((Page) bufdata, tag.blockNum, piv_flags,
8378 : failed_checksum))
8379 : {
8380 96 : if (flags & READ_BUFFERS_ZERO_ON_ERROR)
8381 : {
8382 46 : memset(bufdata, 0, BLCKSZ);
8383 46 : *zeroed_buffer = true;
8384 : }
8385 : else
8386 : {
8387 50 : *buffer_invalid = true;
8388 : /* mark buffer as having failed */
8389 50 : failed = true;
8390 : }
8391 : }
8392 1323427 : else if (*failed_checksum)
8393 12 : *ignored_checksum = true;
8394 :
8395 : /* undo what we did above */
8396 : #ifdef USE_VALGRIND
8397 : if (!BufferIsPinned(buffer))
8398 : VALGRIND_MAKE_MEM_NOACCESS(bufdata, BLCKSZ);
8399 : #endif
8400 :
8401 : /*
8402 : * Immediately log a message about the invalid page, but only to the
8403 : * server log. The reason to do so immediately is that this may be
8404 : * executed in a different backend than the one that originated the
8405 : * request. The reason to do so immediately is that the originator
8406 : * might not process the query result immediately (because it is busy
8407 : * doing another part of query processing) or at all (e.g. if it was
8408 : * cancelled or errored out due to another IO also failing). The
8409 : * definer of the IO will emit an ERROR or WARNING when processing the
8410 : * IO's results
8411 : *
8412 : * To avoid duplicating the code to emit these log messages, we reuse
8413 : * buffer_readv_report().
8414 : */
8415 1323523 : if (*buffer_invalid || *failed_checksum || *zeroed_buffer)
8416 : {
8417 108 : PgAioResult result_one = {0};
8418 :
8419 108 : buffer_readv_encode_error(&result_one, is_temp,
8420 108 : *zeroed_buffer,
8421 108 : *ignored_checksum,
8422 108 : *buffer_invalid,
8423 108 : *zeroed_buffer ? 1 : 0,
8424 108 : *failed_checksum ? 1 : 0,
8425 : buf_off, buf_off, buf_off);
8426 108 : pgaio_result_report(result_one, td, LOG_SERVER_ONLY);
8427 : }
8428 : }
8429 :
8430 : /* Terminate I/O and set BM_VALID. */
8431 1323552 : set_flag_bits = failed ? BM_IO_ERROR : BM_VALID;
8432 1323552 : if (is_temp)
8433 8409 : TerminateLocalBufferIO(buf_hdr, false, set_flag_bits, true);
8434 : else
8435 1315143 : TerminateBufferIO(buf_hdr, false, set_flag_bits, false, true);
8436 :
8437 : /*
8438 : * Call the BUFFER_READ_DONE tracepoint in the callback, even though the
8439 : * callback may not be executed in the same backend that called
8440 : * BUFFER_READ_START. The alternative would be to defer calling the
8441 : * tracepoint to a later point (e.g. the local completion callback for
8442 : * shared buffer reads), which seems even less helpful.
8443 : */
8444 : TRACE_POSTGRESQL_BUFFER_READ_DONE(tag.forkNum,
8445 : tag.blockNum,
8446 : tag.spcOid,
8447 : tag.dbOid,
8448 : tag.relNumber,
8449 : is_temp ? MyProcNumber : INVALID_PROC_NUMBER,
8450 : false);
8451 1323552 : }
8452 :
8453 : /*
8454 : * Perform completion handling of a single AIO read. This read may cover
8455 : * multiple blocks / buffers.
8456 : *
8457 : * Shared between shared and local buffers, to reduce code duplication.
8458 : */
8459 : static pg_attribute_always_inline PgAioResult
8460 1184607 : buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
8461 : uint8 cb_data, bool is_temp)
8462 : {
8463 1184607 : PgAioResult result = prior_result;
8464 1184607 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
8465 1184607 : uint8 first_error_off = 0;
8466 1184607 : uint8 first_zeroed_off = 0;
8467 1184607 : uint8 first_ignored_off = 0;
8468 1184607 : uint8 error_count = 0;
8469 1184607 : uint8 zeroed_count = 0;
8470 1184607 : uint8 ignored_count = 0;
8471 1184607 : uint8 checkfail_count = 0;
8472 : uint64 *io_data;
8473 : uint8 handle_data_len;
8474 :
8475 : if (is_temp)
8476 : {
8477 : Assert(td->smgr.is_temp);
8478 : Assert(pgaio_io_get_owner(ioh) == MyProcNumber);
8479 : }
8480 : else
8481 : Assert(!td->smgr.is_temp);
8482 :
8483 : /*
8484 : * Iterate over all the buffers affected by this IO and call the
8485 : * per-buffer completion function for each buffer.
8486 : */
8487 1184607 : io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
8488 2508159 : for (uint8 buf_off = 0; buf_off < handle_data_len; buf_off++)
8489 : {
8490 1323552 : Buffer buf = io_data[buf_off];
8491 : bool failed;
8492 1323552 : bool failed_verification = false;
8493 1323552 : bool failed_checksum = false;
8494 1323552 : bool zeroed_buffer = false;
8495 1323552 : bool ignored_checksum = false;
8496 :
8497 : Assert(BufferIsValid(buf));
8498 :
8499 : /*
8500 : * If the entire I/O failed on a lower-level, each buffer needs to be
8501 : * marked as failed. In case of a partial read, the first few buffers
8502 : * may be ok.
8503 : */
8504 1323552 : failed =
8505 1323552 : prior_result.status == PGAIO_RS_ERROR
8506 1323552 : || prior_result.result <= buf_off;
8507 :
8508 1323552 : buffer_readv_complete_one(td, buf_off, buf, cb_data, failed, is_temp,
8509 : &failed_verification,
8510 : &failed_checksum,
8511 : &ignored_checksum,
8512 : &zeroed_buffer);
8513 :
8514 : /*
8515 : * Track information about the number of different kinds of error
8516 : * conditions across all pages, as there can be multiple pages failing
8517 : * verification as part of one IO.
8518 : */
8519 1323552 : if (failed_verification && !zeroed_buffer && error_count++ == 0)
8520 44 : first_error_off = buf_off;
8521 1323552 : if (zeroed_buffer && zeroed_count++ == 0)
8522 34 : first_zeroed_off = buf_off;
8523 1323552 : if (ignored_checksum && ignored_count++ == 0)
8524 10 : first_ignored_off = buf_off;
8525 1323552 : if (failed_checksum)
8526 32 : checkfail_count++;
8527 : }
8528 :
8529 : /*
8530 : * If the smgr read succeeded [partially] and page verification failed for
8531 : * some of the pages, adjust the IO's result state appropriately.
8532 : */
8533 1184607 : if (prior_result.status != PGAIO_RS_ERROR &&
8534 1184554 : (error_count > 0 || ignored_count > 0 || zeroed_count > 0))
8535 : {
8536 84 : buffer_readv_encode_error(&result, is_temp,
8537 : zeroed_count > 0, ignored_count > 0,
8538 : error_count, zeroed_count, checkfail_count,
8539 : first_error_off, first_zeroed_off,
8540 : first_ignored_off);
8541 84 : pgaio_result_report(result, td, DEBUG1);
8542 : }
8543 :
8544 : /*
8545 : * For shared relations this reporting is done in
8546 : * shared_buffer_readv_complete_local().
8547 : */
8548 1184607 : if (is_temp && checkfail_count > 0)
8549 2 : pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
8550 : checkfail_count);
8551 :
8552 1184607 : return result;
8553 : }
8554 :
8555 : /*
8556 : * AIO error reporting callback for aio_shared_buffer_readv_cb and
8557 : * aio_local_buffer_readv_cb.
8558 : *
8559 : * The error is encoded / decoded in buffer_readv_encode_error() /
8560 : * buffer_readv_decode_error().
8561 : */
8562 : static void
8563 272 : buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
8564 : int elevel)
8565 : {
8566 272 : int nblocks = td->smgr.nblocks;
8567 272 : BlockNumber first = td->smgr.blockNum;
8568 272 : BlockNumber last = first + nblocks - 1;
8569 272 : ProcNumber errProc =
8570 272 : td->smgr.is_temp ? MyProcNumber : INVALID_PROC_NUMBER;
8571 : RelPathStr rpath =
8572 272 : relpathbackend(td->smgr.rlocator, errProc, td->smgr.forkNum);
8573 : bool zeroed_any,
8574 : ignored_any;
8575 : uint8 zeroed_or_error_count,
8576 : checkfail_count,
8577 : first_off;
8578 : uint8 affected_count;
8579 : const char *msg_one,
8580 : *msg_mult,
8581 : *det_mult,
8582 : *hint_mult;
8583 :
8584 272 : buffer_readv_decode_error(result, &zeroed_any, &ignored_any,
8585 : &zeroed_or_error_count,
8586 : &checkfail_count,
8587 : &first_off);
8588 :
8589 : /*
8590 : * Treat a read that had both zeroed buffers *and* ignored checksums as a
8591 : * special case, it's too irregular to be emitted the same way as the
8592 : * other cases.
8593 : */
8594 272 : if (zeroed_any && ignored_any)
8595 : {
8596 : Assert(zeroed_any && ignored_any);
8597 : Assert(nblocks > 1); /* same block can't be both zeroed and ignored */
8598 : Assert(result.status != PGAIO_RS_ERROR);
8599 4 : affected_count = zeroed_or_error_count;
8600 :
8601 4 : ereport(elevel,
8602 : errcode(ERRCODE_DATA_CORRUPTED),
8603 : errmsg("zeroing %u page(s) and ignoring %u checksum failure(s) among blocks %u..%u of relation \"%s\"",
8604 : affected_count, checkfail_count, first, last, rpath.str),
8605 : affected_count > 1 ?
8606 : errdetail("Block %u held the first zeroed page.",
8607 : first + first_off) : 0,
8608 : errhint_plural("See server log for details about the other %d invalid block.",
8609 : "See server log for details about the other %d invalid blocks.",
8610 : affected_count + checkfail_count - 1,
8611 : affected_count + checkfail_count - 1));
8612 4 : return;
8613 : }
8614 :
8615 : /*
8616 : * The other messages are highly repetitive. To avoid duplicating a long
8617 : * and complicated ereport(), gather the translated format strings
8618 : * separately and then do one common ereport.
8619 : */
8620 268 : if (result.status == PGAIO_RS_ERROR)
8621 : {
8622 : Assert(!zeroed_any); /* can't have invalid pages when zeroing them */
8623 136 : affected_count = zeroed_or_error_count;
8624 136 : msg_one = _("invalid page in block %u of relation \"%s\"");
8625 136 : msg_mult = _("%u invalid pages among blocks %u..%u of relation \"%s\"");
8626 136 : det_mult = _("Block %u held the first invalid page.");
8627 136 : hint_mult = _("See server log for the other %u invalid block(s).");
8628 : }
8629 132 : else if (zeroed_any && !ignored_any)
8630 : {
8631 108 : affected_count = zeroed_or_error_count;
8632 108 : msg_one = _("invalid page in block %u of relation \"%s\"; zeroing out page");
8633 108 : msg_mult = _("zeroing out %u invalid pages among blocks %u..%u of relation \"%s\"");
8634 108 : det_mult = _("Block %u held the first zeroed page.");
8635 108 : hint_mult = _("See server log for the other %u zeroed block(s).");
8636 : }
8637 24 : else if (!zeroed_any && ignored_any)
8638 : {
8639 24 : affected_count = checkfail_count;
8640 24 : msg_one = _("ignoring checksum failure in block %u of relation \"%s\"");
8641 24 : msg_mult = _("ignoring %u checksum failures among blocks %u..%u of relation \"%s\"");
8642 24 : det_mult = _("Block %u held the first ignored page.");
8643 24 : hint_mult = _("See server log for the other %u ignored block(s).");
8644 : }
8645 : else
8646 0 : pg_unreachable();
8647 :
8648 268 : ereport(elevel,
8649 : errcode(ERRCODE_DATA_CORRUPTED),
8650 : affected_count == 1 ?
8651 : errmsg_internal(msg_one, first + first_off, rpath.str) :
8652 : errmsg_internal(msg_mult, affected_count, first, last, rpath.str),
8653 : affected_count > 1 ? errdetail_internal(det_mult, first + first_off) : 0,
8654 : affected_count > 1 ? errhint_internal(hint_mult, affected_count - 1) : 0);
8655 : }
8656 :
8657 : static void
8658 1315979 : shared_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
8659 : {
8660 1315979 : buffer_stage_common(ioh, false, false);
8661 1315979 : }
8662 :
8663 : static PgAioResult
8664 1182801 : shared_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
8665 : uint8 cb_data)
8666 : {
8667 1182801 : return buffer_readv_complete(ioh, prior_result, cb_data, false);
8668 : }
8669 :
8670 : /*
8671 : * We need a backend-local completion callback for shared buffers, to be able
8672 : * to report checksum errors correctly. Unfortunately that can only safely
8673 : * happen if the reporting backend has previously called
8674 : * pgstat_prepare_report_checksum_failure(), which we can only guarantee in
8675 : * the backend that started the IO. Hence this callback.
8676 : */
8677 : static PgAioResult
8678 1315979 : shared_buffer_readv_complete_local(PgAioHandle *ioh, PgAioResult prior_result,
8679 : uint8 cb_data)
8680 : {
8681 : bool zeroed_any,
8682 : ignored_any;
8683 : uint8 zeroed_or_error_count,
8684 : checkfail_count,
8685 : first_off;
8686 :
8687 1315979 : if (prior_result.status == PGAIO_RS_OK)
8688 1315902 : return prior_result;
8689 :
8690 77 : buffer_readv_decode_error(prior_result,
8691 : &zeroed_any,
8692 : &ignored_any,
8693 : &zeroed_or_error_count,
8694 : &checkfail_count,
8695 : &first_off);
8696 :
8697 77 : if (checkfail_count)
8698 : {
8699 24 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
8700 :
8701 24 : pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
8702 : checkfail_count);
8703 : }
8704 :
8705 77 : return prior_result;
8706 : }
8707 :
8708 : static void
8709 1806 : local_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
8710 : {
8711 1806 : buffer_stage_common(ioh, false, true);
8712 1806 : }
8713 :
8714 : static PgAioResult
8715 1806 : local_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
8716 : uint8 cb_data)
8717 : {
8718 1806 : return buffer_readv_complete(ioh, prior_result, cb_data, true);
8719 : }
8720 :
8721 : /* readv callback is passed READ_BUFFERS_* flags as callback data */
8722 : const PgAioHandleCallbacks aio_shared_buffer_readv_cb = {
8723 : .stage = shared_buffer_readv_stage,
8724 : .complete_shared = shared_buffer_readv_complete,
8725 : /* need a local callback to report checksum failures */
8726 : .complete_local = shared_buffer_readv_complete_local,
8727 : .report = buffer_readv_report,
8728 : };
8729 :
8730 : /* readv callback is passed READ_BUFFERS_* flags as callback data */
8731 : const PgAioHandleCallbacks aio_local_buffer_readv_cb = {
8732 : .stage = local_buffer_readv_stage,
8733 :
8734 : /*
8735 : * Note that this, in contrast to the shared_buffers case, uses
8736 : * complete_local, as only the issuing backend has access to the required
8737 : * datastructures. This is important in case the IO completion may be
8738 : * consumed incidentally by another backend.
8739 : */
8740 : .complete_local = local_buffer_readv_complete,
8741 : .report = buffer_readv_report,
8742 : };
|