Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * md.c
4 : * This code manages relations that reside on magnetic disk.
5 : *
6 : * Or at least, that was what the Berkeley folk had in mind when they named
7 : * this file. In reality, what this code provides is an interface from
8 : * the smgr API to Unix-like filesystem APIs, so it will work with any type
9 : * of device for which the operating system provides filesystem support.
10 : * It doesn't matter whether the bits are on spinning rust or some other
11 : * storage technology.
12 : *
13 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
14 : * Portions Copyright (c) 1994, Regents of the University of California
15 : *
16 : *
17 : * IDENTIFICATION
18 : * src/backend/storage/smgr/md.c
19 : *
20 : *-------------------------------------------------------------------------
21 : */
22 : #include "postgres.h"
23 :
24 : #include <unistd.h>
25 : #include <fcntl.h>
26 : #include <sys/file.h>
27 :
28 : #include "access/xlogutils.h"
29 : #include "commands/tablespace.h"
30 : #include "common/file_utils.h"
31 : #include "miscadmin.h"
32 : #include "pg_trace.h"
33 : #include "pgstat.h"
34 : #include "storage/aio.h"
35 : #include "storage/bufmgr.h"
36 : #include "storage/fd.h"
37 : #include "storage/md.h"
38 : #include "storage/relfilelocator.h"
39 : #include "storage/smgr.h"
40 : #include "storage/sync.h"
41 : #include "utils/memutils.h"
42 :
43 : /*
44 : * The magnetic disk storage manager keeps track of open file
45 : * descriptors in its own descriptor pool. This is done to make it
46 : * easier to support relations that are larger than the operating
47 : * system's file size limit (often 2GBytes). In order to do that,
48 : * we break relations up into "segment" files that are each shorter than
49 : * the OS file size limit. The segment size is set by the RELSEG_SIZE
50 : * configuration constant in pg_config.h.
51 : *
52 : * On disk, a relation must consist of consecutively numbered segment
53 : * files in the pattern
54 : * -- Zero or more full segments of exactly RELSEG_SIZE blocks each
55 : * -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
56 : * -- Optionally, any number of inactive segments of size 0 blocks.
57 : * The full and partial segments are collectively the "active" segments.
58 : * Inactive segments are those that once contained data but are currently
59 : * not needed because of an mdtruncate() operation. The reason for leaving
60 : * them present at size zero, rather than unlinking them, is that other
61 : * backends and/or the checkpointer might be holding open file references to
62 : * such segments. If the relation expands again after mdtruncate(), such
63 : * that a deactivated segment becomes active again, it is important that
64 : * such file references still be valid --- else data might get written
65 : * out to an unlinked old copy of a segment file that will eventually
66 : * disappear.
67 : *
68 : * File descriptors are stored in the per-fork md_seg_fds arrays inside
69 : * SMgrRelation. The length of these arrays is stored in md_num_open_segs.
70 : * Note that a fork's md_num_open_segs having a specific value does not
71 : * necessarily mean the relation doesn't have additional segments; we may
72 : * just not have opened the next segment yet. (We could not have "all
73 : * segments are in the array" as an invariant anyway, since another backend
74 : * could extend the relation while we aren't looking.) We do not have
75 : * entries for inactive segments, however; as soon as we find a partial
76 : * segment, we assume that any subsequent segments are inactive.
77 : *
78 : * The entire MdfdVec array is palloc'd in the MdCxt memory context.
79 : */
80 :
81 : typedef struct _MdfdVec
82 : {
83 : File mdfd_vfd; /* fd number in fd.c's pool */
84 : BlockNumber mdfd_segno; /* segment number, from 0 */
85 : } MdfdVec;
86 :
87 : static MemoryContext MdCxt; /* context for all MdfdVec objects */
88 :
89 :
90 : /* Populate a file tag describing an md.c segment file. */
91 : #define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \
92 : ( \
93 : memset(&(a), 0, sizeof(FileTag)), \
94 : (a).handler = SYNC_HANDLER_MD, \
95 : (a).rlocator = (xx_rlocator), \
96 : (a).forknum = (xx_forknum), \
97 : (a).segno = (xx_segno) \
98 : )
99 :
100 :
101 : /*** behavior for mdopen & _mdfd_getseg ***/
102 : /* ereport if segment not present */
103 : #define EXTENSION_FAIL (1 << 0)
104 : /* return NULL if segment not present */
105 : #define EXTENSION_RETURN_NULL (1 << 1)
106 : /* create new segments as needed */
107 : #define EXTENSION_CREATE (1 << 2)
108 : /* create new segments if needed during recovery */
109 : #define EXTENSION_CREATE_RECOVERY (1 << 3)
110 : /* don't try to open a segment, if not already open */
111 : #define EXTENSION_DONT_OPEN (1 << 5)
112 :
113 :
114 : /*
115 : * Fixed-length string to represent paths to files that need to be built by
116 : * md.c.
117 : *
118 : * The maximum number of segments is MaxBlockNumber / RELSEG_SIZE, where
119 : * RELSEG_SIZE can be set to 1 (for testing only).
120 : */
121 : #define SEGMENT_CHARS OIDCHARS
122 : #define MD_PATH_STR_MAXLEN \
123 : (\
124 : REL_PATH_STR_MAXLEN \
125 : + sizeof((char)'.') \
126 : + SEGMENT_CHARS \
127 : )
128 : typedef struct MdPathStr
129 : {
130 : char str[MD_PATH_STR_MAXLEN + 1];
131 : } MdPathStr;
132 :
133 :
134 : /* local routines */
135 : static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forknum,
136 : bool isRedo);
137 : static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
138 : static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
139 : MdfdVec *seg);
140 : static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
141 : BlockNumber segno);
142 : static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
143 : BlockNumber segno);
144 : static void _fdvec_resize(SMgrRelation reln,
145 : ForkNumber forknum,
146 : int nseg);
147 : static MdPathStr _mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
148 : BlockNumber segno);
149 : static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forknum,
150 : BlockNumber segno, int oflags);
151 : static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forknum,
152 : BlockNumber blkno, bool skipFsync, int behavior);
153 : static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
154 : MdfdVec *seg);
155 :
156 : static PgAioResult md_readv_complete(PgAioHandle *ioh, PgAioResult prior_result, uint8 cb_data);
157 : static void md_readv_report(PgAioResult result, const PgAioTargetData *td, int elevel);
158 :
159 : const PgAioHandleCallbacks aio_md_readv_cb = {
160 : .complete_shared = md_readv_complete,
161 : .report = md_readv_report,
162 : };
163 :
164 :
165 : static inline int
166 2812944 : _mdfd_open_flags(void)
167 : {
168 2812944 : int flags = O_RDWR | PG_BINARY;
169 :
170 2812944 : if (io_direct_flags & IO_DIRECT_DATA)
171 622 : flags |= PG_O_DIRECT;
172 :
173 2812944 : return flags;
174 : }
175 :
176 : /*
177 : * mdinit() -- Initialize private state for magnetic disk storage manager.
178 : */
179 : void
180 43174 : mdinit(void)
181 : {
182 43174 : MdCxt = AllocSetContextCreate(TopMemoryContext,
183 : "MdSmgr",
184 : ALLOCSET_DEFAULT_SIZES);
185 43174 : }
186 :
187 : /*
188 : * mdexists() -- Does the physical file exist?
189 : *
190 : * Note: this will return true for lingering files, with pending deletions
191 : */
192 : bool
193 1170728 : mdexists(SMgrRelation reln, ForkNumber forknum)
194 : {
195 : /*
196 : * Close it first, to ensure that we notice if the fork has been unlinked
197 : * since we opened it. As an optimization, we can skip that in recovery,
198 : * which already closes relations when dropping them.
199 : */
200 1170728 : if (!InRecovery)
201 1129320 : mdclose(reln, forknum);
202 :
203 1170728 : return (mdopenfork(reln, forknum, EXTENSION_RETURN_NULL) != NULL);
204 : }
205 :
206 : /*
207 : * mdcreate() -- Create a new relation on magnetic disk.
208 : *
209 : * If isRedo is true, it's okay for the relation to exist already.
210 : */
211 : void
212 11233076 : mdcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
213 : {
214 : MdfdVec *mdfd;
215 : RelPathStr path;
216 : File fd;
217 :
218 11233076 : if (isRedo && reln->md_num_open_segs[forknum] > 0)
219 10913004 : return; /* created and opened already... */
220 :
221 : Assert(reln->md_num_open_segs[forknum] == 0);
222 :
223 : /*
224 : * We may be using the target table space for the first time in this
225 : * database, so create a per-database subdirectory if needed.
226 : *
227 : * XXX this is a fairly ugly violation of module layering, but this seems
228 : * to be the best place to put the check. Maybe TablespaceCreateDbspace
229 : * should be here and not in commands/tablespace.c? But that would imply
230 : * importing a lot of stuff that smgr.c oughtn't know, either.
231 : */
232 320072 : TablespaceCreateDbspace(reln->smgr_rlocator.locator.spcOid,
233 : reln->smgr_rlocator.locator.dbOid,
234 : isRedo);
235 :
236 320072 : path = relpath(reln->smgr_rlocator, forknum);
237 :
238 320072 : fd = PathNameOpenFile(path.str, _mdfd_open_flags() | O_CREAT | O_EXCL);
239 :
240 320072 : if (fd < 0)
241 : {
242 8838 : int save_errno = errno;
243 :
244 8838 : if (isRedo)
245 8838 : fd = PathNameOpenFile(path.str, _mdfd_open_flags());
246 8838 : if (fd < 0)
247 : {
248 : /* be sure to report the error reported by create, not open */
249 0 : errno = save_errno;
250 0 : ereport(ERROR,
251 : (errcode_for_file_access(),
252 : errmsg("could not create file \"%s\": %m", path.str)));
253 : }
254 : }
255 :
256 320072 : _fdvec_resize(reln, forknum, 1);
257 320072 : mdfd = &reln->md_seg_fds[forknum][0];
258 320072 : mdfd->mdfd_vfd = fd;
259 320072 : mdfd->mdfd_segno = 0;
260 :
261 320072 : if (!SmgrIsTemp(reln))
262 313548 : register_dirty_segment(reln, forknum, mdfd);
263 : }
264 :
265 : /*
266 : * mdunlink() -- Unlink a relation.
267 : *
268 : * Note that we're passed a RelFileLocatorBackend --- by the time this is called,
269 : * there won't be an SMgrRelation hashtable entry anymore.
270 : *
271 : * forknum can be a fork number to delete a specific fork, or InvalidForkNumber
272 : * to delete all forks.
273 : *
274 : * For regular relations, we don't unlink the first segment file of the rel,
275 : * but just truncate it to zero length, and record a request to unlink it after
276 : * the next checkpoint. Additional segments can be unlinked immediately,
277 : * however. Leaving the empty file in place prevents that relfilenumber
278 : * from being reused. The scenario this protects us from is:
279 : * 1. We delete a relation (and commit, and actually remove its file).
280 : * 2. We create a new relation, which by chance gets the same relfilenumber as
281 : * the just-deleted one (OIDs must've wrapped around for that to happen).
282 : * 3. We crash before another checkpoint occurs.
283 : * During replay, we would delete the file and then recreate it, which is fine
284 : * if the contents of the file were repopulated by subsequent WAL entries.
285 : * But if we didn't WAL-log insertions, but instead relied on fsyncing the
286 : * file after populating it (as we do at wal_level=minimal), the contents of
287 : * the file would be lost forever. By leaving the empty file until after the
288 : * next checkpoint, we prevent reassignment of the relfilenumber until it's
289 : * safe, because relfilenumber assignment skips over any existing file.
290 : *
291 : * Additional segments, if any, are truncated and then unlinked. The reason
292 : * for truncating is that other backends may still hold open FDs for these at
293 : * the smgr level, so that the kernel can't remove the file yet. We want to
294 : * reclaim the disk space right away despite that.
295 : *
296 : * We do not need to go through this dance for temp relations, though, because
297 : * we never make WAL entries for temp rels, and so a temp rel poses no threat
298 : * to the health of a regular rel that has taken over its relfilenumber.
299 : * The fact that temp rels and regular rels have different file naming
300 : * patterns provides additional safety. Other backends shouldn't have open
301 : * FDs for them, either.
302 : *
303 : * We also don't do it while performing a binary upgrade. There is no reuse
304 : * hazard in that case, since after a crash or even a simple ERROR, the
305 : * upgrade fails and the whole cluster must be recreated from scratch.
306 : * Furthermore, it is important to remove the files from disk immediately,
307 : * because we may be about to reuse the same relfilenumber.
308 : *
309 : * All the above applies only to the relation's main fork; other forks can
310 : * just be removed immediately, since they are not needed to prevent the
311 : * relfilenumber from being recycled. Also, we do not carefully
312 : * track whether other forks have been created or not, but just attempt to
313 : * unlink them unconditionally; so we should never complain about ENOENT.
314 : *
315 : * If isRedo is true, it's unsurprising for the relation to be already gone.
316 : * Also, we should remove the file immediately instead of queuing a request
317 : * for later, since during redo there's no possibility of creating a
318 : * conflicting relation.
319 : *
320 : * Note: we currently just never warn about ENOENT at all. We could warn in
321 : * the main-fork, non-isRedo case, but it doesn't seem worth the trouble.
322 : *
323 : * Note: any failure should be reported as WARNING not ERROR, because
324 : * we are usually not in a transaction anymore when this is called.
325 : */
326 : void
327 370504 : mdunlink(RelFileLocatorBackend rlocator, ForkNumber forknum, bool isRedo)
328 : {
329 : /* Now do the per-fork work */
330 370504 : if (forknum == InvalidForkNumber)
331 : {
332 0 : for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
333 0 : mdunlinkfork(rlocator, forknum, isRedo);
334 : }
335 : else
336 370504 : mdunlinkfork(rlocator, forknum, isRedo);
337 370504 : }
338 :
339 : /*
340 : * Truncate a file to release disk space.
341 : */
342 : static int
343 435834 : do_truncate(const char *path)
344 : {
345 : int save_errno;
346 : int ret;
347 :
348 435834 : ret = pg_truncate(path, 0);
349 :
350 : /* Log a warning here to avoid repetition in callers. */
351 435834 : if (ret < 0 && errno != ENOENT)
352 : {
353 0 : save_errno = errno;
354 0 : ereport(WARNING,
355 : (errcode_for_file_access(),
356 : errmsg("could not truncate file \"%s\": %m", path)));
357 0 : errno = save_errno;
358 : }
359 :
360 435834 : return ret;
361 : }
362 :
363 : static void
364 370504 : mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forknum, bool isRedo)
365 : {
366 : RelPathStr path;
367 : int ret;
368 : int save_errno;
369 :
370 370504 : path = relpath(rlocator, forknum);
371 :
372 : /*
373 : * Truncate and then unlink the first segment, or just register a request
374 : * to unlink it later, as described in the comments for mdunlink().
375 : */
376 370504 : if (isRedo || IsBinaryUpgrade || forknum != MAIN_FORKNUM ||
377 77584 : RelFileLocatorBackendIsTemp(rlocator))
378 : {
379 299114 : if (!RelFileLocatorBackendIsTemp(rlocator))
380 : {
381 : /* Prevent other backends' fds from holding on to the disk space */
382 274338 : ret = do_truncate(path.str);
383 :
384 : /* Forget any pending sync requests for the first segment */
385 274338 : save_errno = errno;
386 274338 : register_forget_request(rlocator, forknum, 0 /* first seg */ );
387 274338 : errno = save_errno;
388 : }
389 : else
390 24776 : ret = 0;
391 :
392 : /* Next unlink the file, unless it was already found to be missing */
393 299114 : if (ret >= 0 || errno != ENOENT)
394 : {
395 43500 : ret = unlink(path.str);
396 43500 : if (ret < 0 && errno != ENOENT)
397 : {
398 0 : save_errno = errno;
399 0 : ereport(WARNING,
400 : (errcode_for_file_access(),
401 : errmsg("could not remove file \"%s\": %m", path.str)));
402 0 : errno = save_errno;
403 : }
404 : }
405 : }
406 : else
407 : {
408 : /* Prevent other backends' fds from holding on to the disk space */
409 71390 : ret = do_truncate(path.str);
410 :
411 : /* Register request to unlink first segment later */
412 71390 : save_errno = errno;
413 71390 : register_unlink_segment(rlocator, forknum, 0 /* first seg */ );
414 71390 : errno = save_errno;
415 : }
416 :
417 : /*
418 : * Delete any additional segments.
419 : *
420 : * Note that because we loop until getting ENOENT, we will correctly
421 : * remove all inactive segments as well as active ones. Ideally we'd
422 : * continue the loop until getting exactly that errno, but that risks an
423 : * infinite loop if the problem is directory-wide (for instance, if we
424 : * suddenly can't read the data directory itself). We compromise by
425 : * continuing after a non-ENOENT truncate error, but stopping after any
426 : * unlink error. If there is indeed a directory-wide problem, additional
427 : * unlink attempts wouldn't work anyway.
428 : */
429 370504 : if (ret >= 0 || errno != ENOENT)
430 : {
431 : MdPathStr segpath;
432 : BlockNumber segno;
433 :
434 96630 : for (segno = 1;; segno++)
435 : {
436 96630 : sprintf(segpath.str, "%s.%u", path.str, segno);
437 :
438 96630 : if (!RelFileLocatorBackendIsTemp(rlocator))
439 : {
440 : /*
441 : * Prevent other backends' fds from holding on to the disk
442 : * space. We're done if we see ENOENT, though.
443 : */
444 90106 : if (do_truncate(segpath.str) < 0 && errno == ENOENT)
445 90106 : break;
446 :
447 : /*
448 : * Forget any pending sync requests for this segment before we
449 : * try to unlink.
450 : */
451 0 : register_forget_request(rlocator, forknum, segno);
452 : }
453 :
454 6524 : if (unlink(segpath.str) < 0)
455 : {
456 : /* ENOENT is expected after the last segment... */
457 6524 : if (errno != ENOENT)
458 0 : ereport(WARNING,
459 : (errcode_for_file_access(),
460 : errmsg("could not remove file \"%s\": %m", segpath.str)));
461 6524 : break;
462 : }
463 : }
464 : }
465 370504 : }
466 :
467 : /*
468 : * mdextend() -- Add a block to the specified relation.
469 : *
470 : * The semantics are nearly the same as mdwrite(): write at the
471 : * specified position. However, this is to be used for the case of
472 : * extending a relation (i.e., blocknum is at or beyond the current
473 : * EOF). Note that we assume writing a block beyond current EOF
474 : * causes intervening file space to become filled with zeroes.
475 : */
476 : void
477 240658 : mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
478 : const void *buffer, bool skipFsync)
479 : {
480 : off_t seekpos;
481 : int nbytes;
482 : MdfdVec *v;
483 :
484 : /* If this build supports direct I/O, the buffer must be I/O aligned. */
485 : if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ)
486 : Assert((uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer));
487 :
488 : /* This assert is too expensive to have on normally ... */
489 : #ifdef CHECK_WRITE_VS_EXTEND
490 : Assert(blocknum >= mdnblocks(reln, forknum));
491 : #endif
492 :
493 : /*
494 : * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
495 : * more --- we mustn't create a block whose number actually is
496 : * InvalidBlockNumber. (Note that this failure should be unreachable
497 : * because of upstream checks in bufmgr.c.)
498 : */
499 240658 : if (blocknum == InvalidBlockNumber)
500 0 : ereport(ERROR,
501 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
502 : errmsg("cannot extend file \"%s\" beyond %u blocks",
503 : relpath(reln->smgr_rlocator, forknum).str,
504 : InvalidBlockNumber)));
505 :
506 240658 : v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
507 :
508 240658 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
509 :
510 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
511 :
512 240658 : if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_EXTEND)) != BLCKSZ)
513 : {
514 0 : if (nbytes < 0)
515 0 : ereport(ERROR,
516 : (errcode_for_file_access(),
517 : errmsg("could not extend file \"%s\": %m",
518 : FilePathName(v->mdfd_vfd)),
519 : errhint("Check free disk space.")));
520 : /* short write: complain appropriately */
521 0 : ereport(ERROR,
522 : (errcode(ERRCODE_DISK_FULL),
523 : errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
524 : FilePathName(v->mdfd_vfd),
525 : nbytes, BLCKSZ, blocknum),
526 : errhint("Check free disk space.")));
527 : }
528 :
529 240658 : if (!skipFsync && !SmgrIsTemp(reln))
530 52 : register_dirty_segment(reln, forknum, v);
531 :
532 : Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
533 240658 : }
534 :
535 : /*
536 : * mdzeroextend() -- Add new zeroed out blocks to the specified relation.
537 : *
538 : * Similar to mdextend(), except the relation can be extended by multiple
539 : * blocks at once and the added blocks will be filled with zeroes.
540 : */
541 : void
542 423572 : mdzeroextend(SMgrRelation reln, ForkNumber forknum,
543 : BlockNumber blocknum, int nblocks, bool skipFsync)
544 : {
545 : MdfdVec *v;
546 423572 : BlockNumber curblocknum = blocknum;
547 423572 : int remblocks = nblocks;
548 :
549 : Assert(nblocks > 0);
550 :
551 : /* This assert is too expensive to have on normally ... */
552 : #ifdef CHECK_WRITE_VS_EXTEND
553 : Assert(blocknum >= mdnblocks(reln, forknum));
554 : #endif
555 :
556 : /*
557 : * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
558 : * more --- we mustn't create a block whose number actually is
559 : * InvalidBlockNumber or larger.
560 : */
561 423572 : if ((uint64) blocknum + nblocks >= (uint64) InvalidBlockNumber)
562 0 : ereport(ERROR,
563 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
564 : errmsg("cannot extend file \"%s\" beyond %u blocks",
565 : relpath(reln->smgr_rlocator, forknum).str,
566 : InvalidBlockNumber)));
567 :
568 847144 : while (remblocks > 0)
569 : {
570 423572 : BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
571 423572 : off_t seekpos = (off_t) BLCKSZ * segstartblock;
572 : int numblocks;
573 :
574 423572 : if (segstartblock + remblocks > RELSEG_SIZE)
575 0 : numblocks = RELSEG_SIZE - segstartblock;
576 : else
577 423572 : numblocks = remblocks;
578 :
579 423572 : v = _mdfd_getseg(reln, forknum, curblocknum, skipFsync, EXTENSION_CREATE);
580 :
581 : Assert(segstartblock < RELSEG_SIZE);
582 : Assert(segstartblock + numblocks <= RELSEG_SIZE);
583 :
584 : /*
585 : * If available and useful, use posix_fallocate() (via
586 : * FileFallocate()) to extend the relation. That's often more
587 : * efficient than using write(), as it commonly won't cause the kernel
588 : * to allocate page cache space for the extended pages.
589 : *
590 : * However, we don't use FileFallocate() for small extensions, as it
591 : * defeats delayed allocation on some filesystems. Not clear where
592 : * that decision should be made though? For now just use a cutoff of
593 : * 8, anything between 4 and 8 worked OK in some local testing.
594 : */
595 423572 : if (numblocks > 8)
596 : {
597 : int ret;
598 :
599 1330 : ret = FileFallocate(v->mdfd_vfd,
600 : seekpos, (off_t) BLCKSZ * numblocks,
601 : WAIT_EVENT_DATA_FILE_EXTEND);
602 1330 : if (ret != 0)
603 : {
604 0 : ereport(ERROR,
605 : errcode_for_file_access(),
606 : errmsg("could not extend file \"%s\" with FileFallocate(): %m",
607 : FilePathName(v->mdfd_vfd)),
608 : errhint("Check free disk space."));
609 : }
610 : }
611 : else
612 : {
613 : int ret;
614 :
615 : /*
616 : * Even if we don't want to use fallocate, we can still extend a
617 : * bit more efficiently than writing each 8kB block individually.
618 : * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
619 : * to avoid multiple writes or needing a zeroed buffer for the
620 : * whole length of the extension.
621 : */
622 422242 : ret = FileZero(v->mdfd_vfd,
623 : seekpos, (off_t) BLCKSZ * numblocks,
624 : WAIT_EVENT_DATA_FILE_EXTEND);
625 422242 : if (ret < 0)
626 0 : ereport(ERROR,
627 : errcode_for_file_access(),
628 : errmsg("could not extend file \"%s\": %m",
629 : FilePathName(v->mdfd_vfd)),
630 : errhint("Check free disk space."));
631 : }
632 :
633 423572 : if (!skipFsync && !SmgrIsTemp(reln))
634 400842 : register_dirty_segment(reln, forknum, v);
635 :
636 : Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
637 :
638 423572 : remblocks -= numblocks;
639 423572 : curblocknum += numblocks;
640 : }
641 423572 : }
642 :
643 : /*
644 : * mdopenfork() -- Open one fork of the specified relation.
645 : *
646 : * Note we only open the first segment, when there are multiple segments.
647 : *
648 : * If first segment is not present, either ereport or return NULL according
649 : * to "behavior". We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
650 : * EXTENSION_CREATE means it's OK to extend an existing relation, not to
651 : * invent one out of whole cloth.
652 : */
653 : static MdfdVec *
654 7396324 : mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
655 : {
656 : MdfdVec *mdfd;
657 : RelPathStr path;
658 : File fd;
659 :
660 : /* No work if already open */
661 7396324 : if (reln->md_num_open_segs[forknum] > 0)
662 4963248 : return &reln->md_seg_fds[forknum][0];
663 :
664 2433076 : path = relpath(reln->smgr_rlocator, forknum);
665 :
666 2433076 : fd = PathNameOpenFile(path.str, _mdfd_open_flags());
667 :
668 2433076 : if (fd < 0)
669 : {
670 787468 : if ((behavior & EXTENSION_RETURN_NULL) &&
671 787424 : FILE_POSSIBLY_DELETED(errno))
672 787424 : return NULL;
673 44 : ereport(ERROR,
674 : (errcode_for_file_access(),
675 : errmsg("could not open file \"%s\": %m", path.str)));
676 : }
677 :
678 1645608 : _fdvec_resize(reln, forknum, 1);
679 1645608 : mdfd = &reln->md_seg_fds[forknum][0];
680 1645608 : mdfd->mdfd_vfd = fd;
681 1645608 : mdfd->mdfd_segno = 0;
682 :
683 : Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber) RELSEG_SIZE));
684 :
685 1645608 : return mdfd;
686 : }
687 :
688 : /*
689 : * mdopen() -- Initialize newly-opened relation.
690 : */
691 : void
692 2292360 : mdopen(SMgrRelation reln)
693 : {
694 : /* mark it not open */
695 11461800 : for (int forknum = 0; forknum <= MAX_FORKNUM; forknum++)
696 9169440 : reln->md_num_open_segs[forknum] = 0;
697 2292360 : }
698 :
699 : /*
700 : * mdclose() -- Close the specified relation, if it isn't closed already.
701 : */
702 : void
703 7296056 : mdclose(SMgrRelation reln, ForkNumber forknum)
704 : {
705 7296056 : int nopensegs = reln->md_num_open_segs[forknum];
706 :
707 : /* No work if already closed */
708 7296056 : if (nopensegs == 0)
709 6204552 : return;
710 :
711 : /* close segments starting from the end */
712 2183008 : while (nopensegs > 0)
713 : {
714 1091504 : MdfdVec *v = &reln->md_seg_fds[forknum][nopensegs - 1];
715 :
716 1091504 : FileClose(v->mdfd_vfd);
717 1091504 : _fdvec_resize(reln, forknum, nopensegs - 1);
718 1091504 : nopensegs--;
719 : }
720 : }
721 :
722 : /*
723 : * mdprefetch() -- Initiate asynchronous read of the specified blocks of a relation
724 : */
725 : bool
726 16952 : mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
727 : int nblocks)
728 : {
729 : #ifdef USE_PREFETCH
730 :
731 : Assert((io_direct_flags & IO_DIRECT_DATA) == 0);
732 :
733 16952 : if ((uint64) blocknum + nblocks > (uint64) MaxBlockNumber + 1)
734 0 : return false;
735 :
736 33904 : while (nblocks > 0)
737 : {
738 : off_t seekpos;
739 : MdfdVec *v;
740 : int nblocks_this_segment;
741 :
742 16952 : v = _mdfd_getseg(reln, forknum, blocknum, false,
743 16952 : InRecovery ? EXTENSION_RETURN_NULL : EXTENSION_FAIL);
744 16952 : if (v == NULL)
745 0 : return false;
746 :
747 16952 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
748 :
749 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
750 :
751 16952 : nblocks_this_segment =
752 16952 : Min(nblocks,
753 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
754 :
755 16952 : (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ * nblocks_this_segment,
756 : WAIT_EVENT_DATA_FILE_PREFETCH);
757 :
758 16952 : blocknum += nblocks_this_segment;
759 16952 : nblocks -= nblocks_this_segment;
760 : }
761 : #endif /* USE_PREFETCH */
762 :
763 16952 : return true;
764 : }
765 :
766 : /*
767 : * Convert an array of buffer address into an array of iovec objects, and
768 : * return the number that were required. 'iov' must have enough space for up
769 : * to 'nblocks' elements, but the number used may be less depending on
770 : * merging. In the case of a run of fully contiguous buffers, a single iovec
771 : * will be populated that can be handled as a plain non-vectored I/O.
772 : */
773 : static int
774 3540118 : buffers_to_iovec(struct iovec *iov, void **buffers, int nblocks)
775 : {
776 : struct iovec *iovp;
777 : int iovcnt;
778 :
779 : Assert(nblocks >= 1);
780 :
781 : /* If this build supports direct I/O, buffers must be I/O aligned. */
782 7406322 : for (int i = 0; i < nblocks; ++i)
783 : {
784 : if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ)
785 : Assert((uintptr_t) buffers[i] ==
786 : TYPEALIGN(PG_IO_ALIGN_SIZE, buffers[i]));
787 : }
788 :
789 : /* Start the first iovec off with the first buffer. */
790 3540118 : iovp = &iov[0];
791 3540118 : iovp->iov_base = buffers[0];
792 3540118 : iovp->iov_len = BLCKSZ;
793 3540118 : iovcnt = 1;
794 :
795 : /* Try to merge the rest. */
796 3866204 : for (int i = 1; i < nblocks; ++i)
797 : {
798 326086 : void *buffer = buffers[i];
799 :
800 326086 : if (((char *) iovp->iov_base + iovp->iov_len) == buffer)
801 : {
802 : /* Contiguous with the last iovec. */
803 317082 : iovp->iov_len += BLCKSZ;
804 : }
805 : else
806 : {
807 : /* Need a new iovec. */
808 9004 : iovp++;
809 9004 : iovp->iov_base = buffer;
810 9004 : iovp->iov_len = BLCKSZ;
811 9004 : iovcnt++;
812 : }
813 : }
814 :
815 3540118 : return iovcnt;
816 : }
817 :
818 : /*
819 : * mdmaxcombine() -- Return the maximum number of total blocks that can be
820 : * combined with an IO starting at blocknum.
821 : */
822 : uint32
823 66268 : mdmaxcombine(SMgrRelation reln, ForkNumber forknum,
824 : BlockNumber blocknum)
825 : {
826 : BlockNumber segoff;
827 :
828 66268 : segoff = blocknum % ((BlockNumber) RELSEG_SIZE);
829 :
830 66268 : return RELSEG_SIZE - segoff;
831 : }
832 :
833 : /*
834 : * mdreadv() -- Read the specified blocks from a relation.
835 : */
836 : void
837 1196 : mdreadv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
838 : void **buffers, BlockNumber nblocks)
839 : {
840 2392 : while (nblocks > 0)
841 : {
842 : struct iovec iov[PG_IOV_MAX];
843 : int iovcnt;
844 : off_t seekpos;
845 : int nbytes;
846 : MdfdVec *v;
847 : BlockNumber nblocks_this_segment;
848 : size_t transferred_this_segment;
849 : size_t size_this_segment;
850 :
851 1196 : v = _mdfd_getseg(reln, forknum, blocknum, false,
852 : EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
853 :
854 1196 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
855 :
856 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
857 :
858 1196 : nblocks_this_segment =
859 1196 : Min(nblocks,
860 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
861 1196 : nblocks_this_segment = Min(nblocks_this_segment, lengthof(iov));
862 :
863 1196 : if (nblocks_this_segment != nblocks)
864 0 : elog(ERROR, "read crosses segment boundary");
865 :
866 1196 : iovcnt = buffers_to_iovec(iov, buffers, nblocks_this_segment);
867 1196 : size_this_segment = nblocks_this_segment * BLCKSZ;
868 1196 : transferred_this_segment = 0;
869 :
870 : /*
871 : * Inner loop to continue after a short read. We'll keep going until
872 : * we hit EOF rather than assuming that a short read means we hit the
873 : * end.
874 : */
875 : for (;;)
876 : {
877 : TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
878 : reln->smgr_rlocator.locator.spcOid,
879 : reln->smgr_rlocator.locator.dbOid,
880 : reln->smgr_rlocator.locator.relNumber,
881 : reln->smgr_rlocator.backend);
882 1196 : nbytes = FileReadV(v->mdfd_vfd, iov, iovcnt, seekpos,
883 : WAIT_EVENT_DATA_FILE_READ);
884 : TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
885 : reln->smgr_rlocator.locator.spcOid,
886 : reln->smgr_rlocator.locator.dbOid,
887 : reln->smgr_rlocator.locator.relNumber,
888 : reln->smgr_rlocator.backend,
889 : nbytes,
890 : size_this_segment - transferred_this_segment);
891 :
892 : #ifdef SIMULATE_SHORT_READ
893 : nbytes = Min(nbytes, 4096);
894 : #endif
895 :
896 1196 : if (nbytes < 0)
897 0 : ereport(ERROR,
898 : (errcode_for_file_access(),
899 : errmsg("could not read blocks %u..%u in file \"%s\": %m",
900 : blocknum,
901 : blocknum + nblocks_this_segment - 1,
902 : FilePathName(v->mdfd_vfd))));
903 :
904 1196 : if (nbytes == 0)
905 : {
906 : /*
907 : * We are at or past EOF, or we read a partial block at EOF.
908 : * Normally this is an error; upper levels should never try to
909 : * read a nonexistent block. However, if zero_damaged_pages
910 : * is ON or we are InRecovery, we should instead return zeroes
911 : * without complaining. This allows, for example, the case of
912 : * trying to update a block that was later truncated away.
913 : *
914 : * NB: We think that this codepath is unreachable in recovery
915 : * and incomplete with zero_damaged_pages, as missing segments
916 : * are not created. Putting blocks into the buffer-pool that
917 : * do not exist on disk is rather problematic, as it will not
918 : * be found by scans that rely on smgrnblocks(), as they are
919 : * beyond EOF. It also can cause weird problems with relation
920 : * extension, as relation extension does not expect blocks
921 : * beyond EOF to exist.
922 : *
923 : * Therefore we do not want to copy the logic into
924 : * mdstartreadv(), where it would have to be more complicated
925 : * due to potential differences in the zero_damaged_pages
926 : * setting between the definer and completor of IO.
927 : *
928 : * For PG 18, we are putting an Assert(false) in mdreadv()
929 : * (triggering failures in assertion-enabled builds, but
930 : * continuing to work in production builds). Afterwards we
931 : * plan to remove this code entirely.
932 : */
933 0 : if (zero_damaged_pages || InRecovery)
934 : {
935 : Assert(false); /* see comment above */
936 :
937 0 : for (BlockNumber i = transferred_this_segment / BLCKSZ;
938 : i < nblocks_this_segment;
939 0 : ++i)
940 0 : memset(buffers[i], 0, BLCKSZ);
941 0 : break;
942 : }
943 : else
944 0 : ereport(ERROR,
945 : (errcode(ERRCODE_DATA_CORRUPTED),
946 : errmsg("could not read blocks %u..%u in file \"%s\": read only %zu of %zu bytes",
947 : blocknum,
948 : blocknum + nblocks_this_segment - 1,
949 : FilePathName(v->mdfd_vfd),
950 : transferred_this_segment,
951 : size_this_segment)));
952 : }
953 :
954 : /* One loop should usually be enough. */
955 1196 : transferred_this_segment += nbytes;
956 : Assert(transferred_this_segment <= size_this_segment);
957 1196 : if (transferred_this_segment == size_this_segment)
958 1196 : break;
959 :
960 : /* Adjust position and vectors after a short read. */
961 0 : seekpos += nbytes;
962 0 : iovcnt = compute_remaining_iovec(iov, iov, iovcnt, nbytes);
963 : }
964 :
965 1196 : nblocks -= nblocks_this_segment;
966 1196 : buffers += nblocks_this_segment;
967 1196 : blocknum += nblocks_this_segment;
968 : }
969 1196 : }
970 :
971 : /*
972 : * mdstartreadv() -- Asynchronous version of mdreadv().
973 : */
974 : void
975 2466050 : mdstartreadv(PgAioHandle *ioh,
976 : SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
977 : void **buffers, BlockNumber nblocks)
978 : {
979 : off_t seekpos;
980 : MdfdVec *v;
981 : BlockNumber nblocks_this_segment;
982 : struct iovec *iov;
983 : int iovcnt;
984 : int ret;
985 :
986 2466050 : v = _mdfd_getseg(reln, forknum, blocknum, false,
987 : EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
988 :
989 2466020 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
990 :
991 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
992 :
993 2466020 : nblocks_this_segment =
994 2466020 : Min(nblocks,
995 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
996 :
997 2466020 : if (nblocks_this_segment != nblocks)
998 0 : elog(ERROR, "read crossing segment boundary");
999 :
1000 2466020 : iovcnt = pgaio_io_get_iovec(ioh, &iov);
1001 :
1002 : Assert(nblocks <= iovcnt);
1003 :
1004 2466020 : iovcnt = buffers_to_iovec(iov, buffers, nblocks_this_segment);
1005 :
1006 : Assert(iovcnt <= nblocks_this_segment);
1007 :
1008 2466020 : if (!(io_direct_flags & IO_DIRECT_DATA))
1009 2463248 : pgaio_io_set_flag(ioh, PGAIO_HF_BUFFERED);
1010 :
1011 2466020 : pgaio_io_set_target_smgr(ioh,
1012 : reln,
1013 : forknum,
1014 : blocknum,
1015 : nblocks,
1016 : false);
1017 2466020 : pgaio_io_register_callbacks(ioh, PGAIO_HCB_MD_READV, 0);
1018 :
1019 2466020 : ret = FileStartReadV(ioh, v->mdfd_vfd, iovcnt, seekpos, WAIT_EVENT_DATA_FILE_READ);
1020 2466020 : if (ret != 0)
1021 0 : ereport(ERROR,
1022 : (errcode_for_file_access(),
1023 : errmsg("could not start reading blocks %u..%u in file \"%s\": %m",
1024 : blocknum,
1025 : blocknum + nblocks_this_segment - 1,
1026 : FilePathName(v->mdfd_vfd))));
1027 :
1028 : /*
1029 : * The error checks corresponding to the post-read checks in mdreadv() are
1030 : * in md_readv_complete().
1031 : *
1032 : * However we chose, at least for now, to not implement the
1033 : * zero_damaged_pages logic present in mdreadv(). As outlined in mdreadv()
1034 : * that logic is rather problematic, and we want to get rid of it. Here
1035 : * equivalent logic would have to be more complicated due to potential
1036 : * differences in the zero_damaged_pages setting between the definer and
1037 : * completor of IO.
1038 : */
1039 2466020 : }
1040 :
1041 : /*
1042 : * mdwritev() -- Write the supplied blocks at the appropriate location.
1043 : *
1044 : * This is to be used only for updating already-existing blocks of a
1045 : * relation (ie, those before the current EOF). To extend a relation,
1046 : * use mdextend().
1047 : */
1048 : void
1049 1072902 : mdwritev(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
1050 : const void **buffers, BlockNumber nblocks, bool skipFsync)
1051 : {
1052 : /* This assert is too expensive to have on normally ... */
1053 : #ifdef CHECK_WRITE_VS_EXTEND
1054 : Assert((uint64) blocknum + (uint64) nblocks <= (uint64) mdnblocks(reln, forknum));
1055 : #endif
1056 :
1057 2145804 : while (nblocks > 0)
1058 : {
1059 : struct iovec iov[PG_IOV_MAX];
1060 : int iovcnt;
1061 : off_t seekpos;
1062 : int nbytes;
1063 : MdfdVec *v;
1064 : BlockNumber nblocks_this_segment;
1065 : size_t transferred_this_segment;
1066 : size_t size_this_segment;
1067 :
1068 1072902 : v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
1069 : EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
1070 :
1071 1072902 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1072 :
1073 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
1074 :
1075 1072902 : nblocks_this_segment =
1076 1072902 : Min(nblocks,
1077 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
1078 1072902 : nblocks_this_segment = Min(nblocks_this_segment, lengthof(iov));
1079 :
1080 1072902 : if (nblocks_this_segment != nblocks)
1081 0 : elog(ERROR, "write crosses segment boundary");
1082 :
1083 1072902 : iovcnt = buffers_to_iovec(iov, (void **) buffers, nblocks_this_segment);
1084 1072902 : size_this_segment = nblocks_this_segment * BLCKSZ;
1085 1072902 : transferred_this_segment = 0;
1086 :
1087 : /*
1088 : * Inner loop to continue after a short write. If the reason is that
1089 : * we're out of disk space, a future attempt should get an ENOSPC
1090 : * error from the kernel.
1091 : */
1092 : for (;;)
1093 : {
1094 : TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
1095 : reln->smgr_rlocator.locator.spcOid,
1096 : reln->smgr_rlocator.locator.dbOid,
1097 : reln->smgr_rlocator.locator.relNumber,
1098 : reln->smgr_rlocator.backend);
1099 1072902 : nbytes = FileWriteV(v->mdfd_vfd, iov, iovcnt, seekpos,
1100 : WAIT_EVENT_DATA_FILE_WRITE);
1101 : TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
1102 : reln->smgr_rlocator.locator.spcOid,
1103 : reln->smgr_rlocator.locator.dbOid,
1104 : reln->smgr_rlocator.locator.relNumber,
1105 : reln->smgr_rlocator.backend,
1106 : nbytes,
1107 : size_this_segment - transferred_this_segment);
1108 :
1109 : #ifdef SIMULATE_SHORT_WRITE
1110 : nbytes = Min(nbytes, 4096);
1111 : #endif
1112 :
1113 1072902 : if (nbytes < 0)
1114 : {
1115 0 : bool enospc = errno == ENOSPC;
1116 :
1117 0 : ereport(ERROR,
1118 : (errcode_for_file_access(),
1119 : errmsg("could not write blocks %u..%u in file \"%s\": %m",
1120 : blocknum,
1121 : blocknum + nblocks_this_segment - 1,
1122 : FilePathName(v->mdfd_vfd)),
1123 : enospc ? errhint("Check free disk space.") : 0));
1124 : }
1125 :
1126 : /* One loop should usually be enough. */
1127 1072902 : transferred_this_segment += nbytes;
1128 : Assert(transferred_this_segment <= size_this_segment);
1129 1072902 : if (transferred_this_segment == size_this_segment)
1130 1072902 : break;
1131 :
1132 : /* Adjust position and iovecs after a short write. */
1133 0 : seekpos += nbytes;
1134 0 : iovcnt = compute_remaining_iovec(iov, iov, iovcnt, nbytes);
1135 : }
1136 :
1137 1072902 : if (!skipFsync && !SmgrIsTemp(reln))
1138 1064930 : register_dirty_segment(reln, forknum, v);
1139 :
1140 1072902 : nblocks -= nblocks_this_segment;
1141 1072902 : buffers += nblocks_this_segment;
1142 1072902 : blocknum += nblocks_this_segment;
1143 : }
1144 1072902 : }
1145 :
1146 :
1147 : /*
1148 : * mdwriteback() -- Tell the kernel to write pages back to storage.
1149 : *
1150 : * This accepts a range of blocks because flushing several pages at once is
1151 : * considerably more efficient than doing so individually.
1152 : */
1153 : void
1154 0 : mdwriteback(SMgrRelation reln, ForkNumber forknum,
1155 : BlockNumber blocknum, BlockNumber nblocks)
1156 : {
1157 : Assert((io_direct_flags & IO_DIRECT_DATA) == 0);
1158 :
1159 : /*
1160 : * Issue flush requests in as few requests as possible; have to split at
1161 : * segment boundaries though, since those are actually separate files.
1162 : */
1163 0 : while (nblocks > 0)
1164 : {
1165 0 : BlockNumber nflush = nblocks;
1166 : off_t seekpos;
1167 : MdfdVec *v;
1168 : int segnum_start,
1169 : segnum_end;
1170 :
1171 0 : v = _mdfd_getseg(reln, forknum, blocknum, true /* not used */ ,
1172 : EXTENSION_DONT_OPEN);
1173 :
1174 : /*
1175 : * We might be flushing buffers of already removed relations, that's
1176 : * ok, just ignore that case. If the segment file wasn't open already
1177 : * (ie from a recent mdwrite()), then we don't want to re-open it, to
1178 : * avoid a race with PROCSIGNAL_BARRIER_SMGRRELEASE that might leave
1179 : * us with a descriptor to a file that is about to be unlinked.
1180 : */
1181 0 : if (!v)
1182 0 : return;
1183 :
1184 : /* compute offset inside the current segment */
1185 0 : segnum_start = blocknum / RELSEG_SIZE;
1186 :
1187 : /* compute number of desired writes within the current segment */
1188 0 : segnum_end = (blocknum + nblocks - 1) / RELSEG_SIZE;
1189 0 : if (segnum_start != segnum_end)
1190 0 : nflush = RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE));
1191 :
1192 : Assert(nflush >= 1);
1193 : Assert(nflush <= nblocks);
1194 :
1195 0 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1196 :
1197 0 : FileWriteback(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * nflush, WAIT_EVENT_DATA_FILE_FLUSH);
1198 :
1199 0 : nblocks -= nflush;
1200 0 : blocknum += nflush;
1201 : }
1202 : }
1203 :
1204 : /*
1205 : * mdnblocks() -- Get the number of blocks stored in a relation.
1206 : *
1207 : * Important side effect: all active segments of the relation are opened
1208 : * and added to the md_seg_fds array. If this routine has not been
1209 : * called, then only segments up to the last one actually touched
1210 : * are present in the array.
1211 : */
1212 : BlockNumber
1213 4842538 : mdnblocks(SMgrRelation reln, ForkNumber forknum)
1214 : {
1215 : MdfdVec *v;
1216 : BlockNumber nblocks;
1217 : BlockNumber segno;
1218 :
1219 4842538 : mdopenfork(reln, forknum, EXTENSION_FAIL);
1220 :
1221 : /* mdopen has opened the first segment */
1222 : Assert(reln->md_num_open_segs[forknum] > 0);
1223 :
1224 : /*
1225 : * Start from the last open segments, to avoid redundant seeks. We have
1226 : * previously verified that these segments are exactly RELSEG_SIZE long,
1227 : * and it's useless to recheck that each time.
1228 : *
1229 : * NOTE: this assumption could only be wrong if another backend has
1230 : * truncated the relation. We rely on higher code levels to handle that
1231 : * scenario by closing and re-opening the md fd, which is handled via
1232 : * relcache flush. (Since the checkpointer doesn't participate in
1233 : * relcache flush, it could have segment entries for inactive segments;
1234 : * that's OK because the checkpointer never needs to compute relation
1235 : * size.)
1236 : */
1237 4842500 : segno = reln->md_num_open_segs[forknum] - 1;
1238 4842500 : v = &reln->md_seg_fds[forknum][segno];
1239 :
1240 : for (;;)
1241 : {
1242 4842500 : nblocks = _mdnblocks(reln, forknum, v);
1243 4842500 : if (nblocks > ((BlockNumber) RELSEG_SIZE))
1244 0 : elog(FATAL, "segment too big");
1245 4842500 : if (nblocks < ((BlockNumber) RELSEG_SIZE))
1246 4842500 : return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
1247 :
1248 : /*
1249 : * If segment is exactly RELSEG_SIZE, advance to next one.
1250 : */
1251 0 : segno++;
1252 :
1253 : /*
1254 : * We used to pass O_CREAT here, but that has the disadvantage that it
1255 : * might create a segment which has vanished through some operating
1256 : * system misadventure. In such a case, creating the segment here
1257 : * undermines _mdfd_getseg's attempts to notice and report an error
1258 : * upon access to a missing segment.
1259 : */
1260 0 : v = _mdfd_openseg(reln, forknum, segno, 0);
1261 0 : if (v == NULL)
1262 0 : return segno * ((BlockNumber) RELSEG_SIZE);
1263 : }
1264 : }
1265 :
1266 : /*
1267 : * mdtruncate() -- Truncate relation to specified number of blocks.
1268 : *
1269 : * Guaranteed not to allocate memory, so it can be used in a critical section.
1270 : * Caller must have called smgrnblocks() to obtain curnblk while holding a
1271 : * sufficient lock to prevent a change in relation size, and not used any smgr
1272 : * functions for this relation or handled interrupts in between. This makes
1273 : * sure we have opened all active segments, so that truncate loop will get
1274 : * them all!
1275 : */
1276 : void
1277 1832 : mdtruncate(SMgrRelation reln, ForkNumber forknum,
1278 : BlockNumber curnblk, BlockNumber nblocks)
1279 : {
1280 : BlockNumber priorblocks;
1281 : int curopensegs;
1282 :
1283 1832 : if (nblocks > curnblk)
1284 : {
1285 : /* Bogus request ... but no complaint if InRecovery */
1286 0 : if (InRecovery)
1287 0 : return;
1288 0 : ereport(ERROR,
1289 : (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
1290 : relpath(reln->smgr_rlocator, forknum).str,
1291 : nblocks, curnblk)));
1292 : }
1293 1832 : if (nblocks == curnblk)
1294 714 : return; /* no work */
1295 :
1296 : /*
1297 : * Truncate segments, starting at the last one. Starting at the end makes
1298 : * managing the memory for the fd array easier, should there be errors.
1299 : */
1300 1118 : curopensegs = reln->md_num_open_segs[forknum];
1301 2236 : while (curopensegs > 0)
1302 : {
1303 : MdfdVec *v;
1304 :
1305 1118 : priorblocks = (curopensegs - 1) * RELSEG_SIZE;
1306 :
1307 1118 : v = &reln->md_seg_fds[forknum][curopensegs - 1];
1308 :
1309 1118 : if (priorblocks > nblocks)
1310 : {
1311 : /*
1312 : * This segment is no longer active. We truncate the file, but do
1313 : * not delete it, for reasons explained in the header comments.
1314 : */
1315 0 : if (FileTruncate(v->mdfd_vfd, 0, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
1316 0 : ereport(ERROR,
1317 : (errcode_for_file_access(),
1318 : errmsg("could not truncate file \"%s\": %m",
1319 : FilePathName(v->mdfd_vfd))));
1320 :
1321 0 : if (!SmgrIsTemp(reln))
1322 0 : register_dirty_segment(reln, forknum, v);
1323 :
1324 : /* we never drop the 1st segment */
1325 : Assert(v != &reln->md_seg_fds[forknum][0]);
1326 :
1327 0 : FileClose(v->mdfd_vfd);
1328 0 : _fdvec_resize(reln, forknum, curopensegs - 1);
1329 : }
1330 1118 : else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
1331 : {
1332 : /*
1333 : * This is the last segment we want to keep. Truncate the file to
1334 : * the right length. NOTE: if nblocks is exactly a multiple K of
1335 : * RELSEG_SIZE, we will truncate the K+1st segment to 0 length but
1336 : * keep it. This adheres to the invariant given in the header
1337 : * comments.
1338 : */
1339 1118 : BlockNumber lastsegblocks = nblocks - priorblocks;
1340 :
1341 1118 : if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
1342 0 : ereport(ERROR,
1343 : (errcode_for_file_access(),
1344 : errmsg("could not truncate file \"%s\" to %u blocks: %m",
1345 : FilePathName(v->mdfd_vfd),
1346 : nblocks)));
1347 1118 : if (!SmgrIsTemp(reln))
1348 790 : register_dirty_segment(reln, forknum, v);
1349 : }
1350 : else
1351 : {
1352 : /*
1353 : * We still need this segment, so nothing to do for this and any
1354 : * earlier segment.
1355 : */
1356 0 : break;
1357 : }
1358 1118 : curopensegs--;
1359 : }
1360 : }
1361 :
1362 : /*
1363 : * mdregistersync() -- Mark whole relation as needing fsync
1364 : */
1365 : void
1366 50904 : mdregistersync(SMgrRelation reln, ForkNumber forknum)
1367 : {
1368 : int segno;
1369 : int min_inactive_seg;
1370 :
1371 : /*
1372 : * NOTE: mdnblocks makes sure we have opened all active segments, so that
1373 : * the loop below will get them all!
1374 : */
1375 50904 : mdnblocks(reln, forknum);
1376 :
1377 50904 : min_inactive_seg = segno = reln->md_num_open_segs[forknum];
1378 :
1379 : /*
1380 : * Temporarily open inactive segments, then close them after sync. There
1381 : * may be some inactive segments left opened after error, but that is
1382 : * harmless. We don't bother to clean them up and take a risk of further
1383 : * trouble. The next mdclose() will soon close them.
1384 : */
1385 50904 : while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
1386 0 : segno++;
1387 :
1388 101808 : while (segno > 0)
1389 : {
1390 50904 : MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1391 :
1392 50904 : register_dirty_segment(reln, forknum, v);
1393 :
1394 : /* Close inactive segments immediately */
1395 50904 : if (segno > min_inactive_seg)
1396 : {
1397 0 : FileClose(v->mdfd_vfd);
1398 0 : _fdvec_resize(reln, forknum, segno - 1);
1399 : }
1400 :
1401 50904 : segno--;
1402 : }
1403 50904 : }
1404 :
1405 : /*
1406 : * mdimmedsync() -- Immediately sync a relation to stable storage.
1407 : *
1408 : * Note that only writes already issued are synced; this routine knows
1409 : * nothing of dirty buffers that may exist inside the buffer manager. We
1410 : * sync active and inactive segments; smgrDoPendingSyncs() relies on this.
1411 : * Consider a relation skipping WAL. Suppose a checkpoint syncs blocks of
1412 : * some segment, then mdtruncate() renders that segment inactive. If we
1413 : * crash before the next checkpoint syncs the newly-inactive segment, that
1414 : * segment may survive recovery, reintroducing unwanted data into the table.
1415 : */
1416 : void
1417 54 : mdimmedsync(SMgrRelation reln, ForkNumber forknum)
1418 : {
1419 : int segno;
1420 : int min_inactive_seg;
1421 :
1422 : /*
1423 : * NOTE: mdnblocks makes sure we have opened all active segments, so that
1424 : * the loop below will get them all!
1425 : */
1426 54 : mdnblocks(reln, forknum);
1427 :
1428 54 : min_inactive_seg = segno = reln->md_num_open_segs[forknum];
1429 :
1430 : /*
1431 : * Temporarily open inactive segments, then close them after sync. There
1432 : * may be some inactive segments left opened after fsync() error, but that
1433 : * is harmless. We don't bother to clean them up and take a risk of
1434 : * further trouble. The next mdclose() will soon close them.
1435 : */
1436 54 : while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
1437 0 : segno++;
1438 :
1439 108 : while (segno > 0)
1440 : {
1441 54 : MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1442 :
1443 : /*
1444 : * fsyncs done through mdimmedsync() should be tracked in a separate
1445 : * IOContext than those done through mdsyncfiletag() to differentiate
1446 : * between unavoidable client backend fsyncs (e.g. those done during
1447 : * index build) and those which ideally would have been done by the
1448 : * checkpointer. Since other IO operations bypassing the buffer
1449 : * manager could also be tracked in such an IOContext, wait until
1450 : * these are also tracked to track immediate fsyncs.
1451 : */
1452 54 : if (FileSync(v->mdfd_vfd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0)
1453 0 : ereport(data_sync_elevel(ERROR),
1454 : (errcode_for_file_access(),
1455 : errmsg("could not fsync file \"%s\": %m",
1456 : FilePathName(v->mdfd_vfd))));
1457 :
1458 : /* Close inactive segments immediately */
1459 54 : if (segno > min_inactive_seg)
1460 : {
1461 0 : FileClose(v->mdfd_vfd);
1462 0 : _fdvec_resize(reln, forknum, segno - 1);
1463 : }
1464 :
1465 54 : segno--;
1466 : }
1467 54 : }
1468 :
1469 : int
1470 941894 : mdfd(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, uint32 *off)
1471 : {
1472 941894 : MdfdVec *v = mdopenfork(reln, forknum, EXTENSION_FAIL);
1473 :
1474 941894 : v = _mdfd_getseg(reln, forknum, blocknum, false,
1475 : EXTENSION_FAIL);
1476 :
1477 941894 : *off = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1478 :
1479 : Assert(*off < (off_t) BLCKSZ * RELSEG_SIZE);
1480 :
1481 941894 : return FileGetRawDesc(v->mdfd_vfd);
1482 : }
1483 :
1484 : /*
1485 : * register_dirty_segment() -- Mark a relation segment as needing fsync
1486 : *
1487 : * If there is a local pending-ops table, just make an entry in it for
1488 : * ProcessSyncRequests to process later. Otherwise, try to pass off the
1489 : * fsync request to the checkpointer process. If that fails, just do the
1490 : * fsync locally before returning (we hope this will not happen often
1491 : * enough to be a performance problem).
1492 : */
1493 : static void
1494 1831066 : register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1495 : {
1496 : FileTag tag;
1497 :
1498 1831066 : INIT_MD_FILETAG(tag, reln->smgr_rlocator.locator, forknum, seg->mdfd_segno);
1499 :
1500 : /* Temp relations should never be fsync'd */
1501 : Assert(!SmgrIsTemp(reln));
1502 :
1503 1831066 : if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */ ))
1504 : {
1505 : instr_time io_start;
1506 :
1507 622 : ereport(DEBUG1,
1508 : (errmsg_internal("could not forward fsync request because request queue is full")));
1509 :
1510 622 : io_start = pgstat_prepare_io_time(track_io_timing);
1511 :
1512 622 : if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0)
1513 0 : ereport(data_sync_elevel(ERROR),
1514 : (errcode_for_file_access(),
1515 : errmsg("could not fsync file \"%s\": %m",
1516 : FilePathName(seg->mdfd_vfd))));
1517 :
1518 : /*
1519 : * We have no way of knowing if the current IOContext is
1520 : * IOCONTEXT_NORMAL or IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] at this
1521 : * point, so count the fsync as being in the IOCONTEXT_NORMAL
1522 : * IOContext. This is probably okay, because the number of backend
1523 : * fsyncs doesn't say anything about the efficacy of the
1524 : * BufferAccessStrategy. And counting both fsyncs done in
1525 : * IOCONTEXT_NORMAL and IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] under
1526 : * IOCONTEXT_NORMAL is likely clearer when investigating the number of
1527 : * backend fsyncs.
1528 : */
1529 622 : pgstat_count_io_op_time(IOOBJECT_RELATION, IOCONTEXT_NORMAL,
1530 : IOOP_FSYNC, io_start, 1, 0);
1531 : }
1532 1831066 : }
1533 :
1534 : /*
1535 : * register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
1536 : */
1537 : static void
1538 71390 : register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
1539 : BlockNumber segno)
1540 : {
1541 : FileTag tag;
1542 :
1543 71390 : INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
1544 :
1545 : /* Should never be used with temp relations */
1546 : Assert(!RelFileLocatorBackendIsTemp(rlocator));
1547 :
1548 71390 : RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
1549 71390 : }
1550 :
1551 : /*
1552 : * register_forget_request() -- forget any fsyncs for a relation fork's segment
1553 : */
1554 : static void
1555 274338 : register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
1556 : BlockNumber segno)
1557 : {
1558 : FileTag tag;
1559 :
1560 274338 : INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
1561 :
1562 274338 : RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
1563 274338 : }
1564 :
1565 : /*
1566 : * ForgetDatabaseSyncRequests -- forget any fsyncs and unlinks for a DB
1567 : */
1568 : void
1569 120 : ForgetDatabaseSyncRequests(Oid dbid)
1570 : {
1571 : FileTag tag;
1572 : RelFileLocator rlocator;
1573 :
1574 120 : rlocator.dbOid = dbid;
1575 120 : rlocator.spcOid = 0;
1576 120 : rlocator.relNumber = 0;
1577 :
1578 120 : INIT_MD_FILETAG(tag, rlocator, InvalidForkNumber, InvalidBlockNumber);
1579 :
1580 120 : RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
1581 120 : }
1582 :
1583 : /*
1584 : * DropRelationFiles -- drop files of all given relations
1585 : */
1586 : void
1587 5180 : DropRelationFiles(RelFileLocator *delrels, int ndelrels, bool isRedo)
1588 : {
1589 : SMgrRelation *srels;
1590 : int i;
1591 :
1592 5180 : srels = palloc(sizeof(SMgrRelation) * ndelrels);
1593 20174 : for (i = 0; i < ndelrels; i++)
1594 : {
1595 14994 : SMgrRelation srel = smgropen(delrels[i], INVALID_PROC_NUMBER);
1596 :
1597 14994 : if (isRedo)
1598 : {
1599 : ForkNumber fork;
1600 :
1601 74730 : for (fork = 0; fork <= MAX_FORKNUM; fork++)
1602 59784 : XLogDropRelation(delrels[i], fork);
1603 : }
1604 14994 : srels[i] = srel;
1605 : }
1606 :
1607 5180 : smgrdounlinkall(srels, ndelrels, isRedo);
1608 :
1609 20174 : for (i = 0; i < ndelrels; i++)
1610 14994 : smgrclose(srels[i]);
1611 5180 : pfree(srels);
1612 5180 : }
1613 :
1614 :
1615 : /*
1616 : * _fdvec_resize() -- Resize the fork's open segments array
1617 : */
1618 : static void
1619 3057184 : _fdvec_resize(SMgrRelation reln,
1620 : ForkNumber forknum,
1621 : int nseg)
1622 : {
1623 3057184 : if (nseg == 0)
1624 : {
1625 1091504 : if (reln->md_num_open_segs[forknum] > 0)
1626 : {
1627 1091504 : pfree(reln->md_seg_fds[forknum]);
1628 1091504 : reln->md_seg_fds[forknum] = NULL;
1629 : }
1630 : }
1631 1965680 : else if (reln->md_num_open_segs[forknum] == 0)
1632 : {
1633 1965680 : reln->md_seg_fds[forknum] =
1634 1965680 : MemoryContextAlloc(MdCxt, sizeof(MdfdVec) * nseg);
1635 : }
1636 0 : else if (nseg > reln->md_num_open_segs[forknum])
1637 : {
1638 : /*
1639 : * It doesn't seem worthwhile complicating the code to amortize
1640 : * repalloc() calls. Those are far faster than PathNameOpenFile() or
1641 : * FileClose(), and the memory context internally will sometimes avoid
1642 : * doing an actual reallocation.
1643 : */
1644 0 : reln->md_seg_fds[forknum] =
1645 0 : repalloc(reln->md_seg_fds[forknum],
1646 : sizeof(MdfdVec) * nseg);
1647 : }
1648 : else
1649 : {
1650 : /*
1651 : * We don't reallocate a smaller array, because we want mdtruncate()
1652 : * to be able to promise that it won't allocate memory, so that it is
1653 : * allowed in a critical section. This means that a bit of space in
1654 : * the array is now wasted, until the next time we add a segment and
1655 : * reallocate.
1656 : */
1657 : }
1658 :
1659 3057184 : reln->md_num_open_segs[forknum] = nseg;
1660 3057184 : }
1661 :
1662 : /*
1663 : * Return the filename for the specified segment of the relation. The
1664 : * returned string is palloc'd.
1665 : */
1666 : static MdPathStr
1667 50982 : _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
1668 : {
1669 : RelPathStr path;
1670 : MdPathStr fullpath;
1671 :
1672 50982 : path = relpath(reln->smgr_rlocator, forknum);
1673 :
1674 50982 : if (segno > 0)
1675 50982 : sprintf(fullpath.str, "%s.%u", path.str, segno);
1676 : else
1677 0 : strcpy(fullpath.str, path.str);
1678 :
1679 50982 : return fullpath;
1680 : }
1681 :
1682 : /*
1683 : * Open the specified segment of the relation,
1684 : * and make a MdfdVec object for it. Returns NULL on failure.
1685 : */
1686 : static MdfdVec *
1687 50958 : _mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber segno,
1688 : int oflags)
1689 : {
1690 : MdfdVec *v;
1691 : File fd;
1692 : MdPathStr fullpath;
1693 :
1694 50958 : fullpath = _mdfd_segpath(reln, forknum, segno);
1695 :
1696 : /* open the file */
1697 50958 : fd = PathNameOpenFile(fullpath.str, _mdfd_open_flags() | oflags);
1698 :
1699 50958 : if (fd < 0)
1700 50958 : return NULL;
1701 :
1702 : /*
1703 : * Segments are always opened in order from lowest to highest, so we must
1704 : * be adding a new one at the end.
1705 : */
1706 : Assert(segno == reln->md_num_open_segs[forknum]);
1707 :
1708 0 : _fdvec_resize(reln, forknum, segno + 1);
1709 :
1710 : /* fill the entry */
1711 0 : v = &reln->md_seg_fds[forknum][segno];
1712 0 : v->mdfd_vfd = fd;
1713 0 : v->mdfd_segno = segno;
1714 :
1715 : Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
1716 :
1717 : /* all done */
1718 0 : return v;
1719 : }
1720 :
1721 : /*
1722 : * _mdfd_getseg() -- Find the segment of the relation holding the
1723 : * specified block.
1724 : *
1725 : * If the segment doesn't exist, we ereport, return NULL, or create the
1726 : * segment, according to "behavior". Note: skipFsync is only used in the
1727 : * EXTENSION_CREATE case.
1728 : */
1729 : static MdfdVec *
1730 5163224 : _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
1731 : bool skipFsync, int behavior)
1732 : {
1733 : MdfdVec *v;
1734 : BlockNumber targetseg;
1735 : BlockNumber nextsegno;
1736 :
1737 : /* some way to handle non-existent segments needs to be specified */
1738 : Assert(behavior &
1739 : (EXTENSION_FAIL | EXTENSION_CREATE | EXTENSION_RETURN_NULL |
1740 : EXTENSION_DONT_OPEN));
1741 :
1742 5163224 : targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
1743 :
1744 : /* if an existing and opened segment, we're done */
1745 5163224 : if (targetseg < reln->md_num_open_segs[forknum])
1746 : {
1747 4722036 : v = &reln->md_seg_fds[forknum][targetseg];
1748 4722036 : return v;
1749 : }
1750 :
1751 : /* The caller only wants the segment if we already had it open. */
1752 441188 : if (behavior & EXTENSION_DONT_OPEN)
1753 0 : return NULL;
1754 :
1755 : /*
1756 : * The target segment is not yet open. Iterate over all the segments
1757 : * between the last opened and the target segment. This way missing
1758 : * segments either raise an error, or get created (according to
1759 : * 'behavior'). Start with either the last opened, or the first segment if
1760 : * none was opened before.
1761 : */
1762 441188 : if (reln->md_num_open_segs[forknum] > 0)
1763 24 : v = &reln->md_seg_fds[forknum][reln->md_num_open_segs[forknum] - 1];
1764 : else
1765 : {
1766 441164 : v = mdopenfork(reln, forknum, behavior);
1767 441158 : if (!v)
1768 0 : return NULL; /* if behavior & EXTENSION_RETURN_NULL */
1769 : }
1770 :
1771 441182 : for (nextsegno = reln->md_num_open_segs[forknum];
1772 0 : nextsegno <= targetseg; nextsegno++)
1773 : {
1774 24 : BlockNumber nblocks = _mdnblocks(reln, forknum, v);
1775 24 : int flags = 0;
1776 :
1777 : Assert(nextsegno == v->mdfd_segno + 1);
1778 :
1779 24 : if (nblocks > ((BlockNumber) RELSEG_SIZE))
1780 0 : elog(FATAL, "segment too big");
1781 :
1782 24 : if ((behavior & EXTENSION_CREATE) ||
1783 24 : (InRecovery && (behavior & EXTENSION_CREATE_RECOVERY)))
1784 : {
1785 : /*
1786 : * Normally we will create new segments only if authorized by the
1787 : * caller (i.e., we are doing mdextend()). But when doing WAL
1788 : * recovery, create segments anyway; this allows cases such as
1789 : * replaying WAL data that has a write into a high-numbered
1790 : * segment of a relation that was later deleted. We want to go
1791 : * ahead and create the segments so we can finish out the replay.
1792 : *
1793 : * We have to maintain the invariant that segments before the last
1794 : * active segment are of size RELSEG_SIZE; therefore, if
1795 : * extending, pad them out with zeroes if needed. (This only
1796 : * matters if in recovery, or if the caller is extending the
1797 : * relation discontiguously, but that can happen in hash indexes.)
1798 : */
1799 0 : if (nblocks < ((BlockNumber) RELSEG_SIZE))
1800 : {
1801 0 : char *zerobuf = palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE,
1802 : MCXT_ALLOC_ZERO);
1803 :
1804 0 : mdextend(reln, forknum,
1805 0 : nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
1806 : zerobuf, skipFsync);
1807 0 : pfree(zerobuf);
1808 : }
1809 0 : flags = O_CREAT;
1810 : }
1811 24 : else if (nblocks < ((BlockNumber) RELSEG_SIZE))
1812 : {
1813 : /*
1814 : * When not extending, only open the next segment if the current
1815 : * one is exactly RELSEG_SIZE. If not (this branch), either
1816 : * return NULL or fail.
1817 : */
1818 24 : if (behavior & EXTENSION_RETURN_NULL)
1819 : {
1820 : /*
1821 : * Some callers discern between reasons for _mdfd_getseg()
1822 : * returning NULL based on errno. As there's no failing
1823 : * syscall involved in this case, explicitly set errno to
1824 : * ENOENT, as that seems the closest interpretation.
1825 : */
1826 0 : errno = ENOENT;
1827 0 : return NULL;
1828 : }
1829 :
1830 24 : ereport(ERROR,
1831 : (errcode_for_file_access(),
1832 : errmsg("could not open file \"%s\" (target block %u): previous segment is only %u blocks",
1833 : _mdfd_segpath(reln, forknum, nextsegno).str,
1834 : blkno, nblocks)));
1835 : }
1836 :
1837 0 : v = _mdfd_openseg(reln, forknum, nextsegno, flags);
1838 :
1839 0 : if (v == NULL)
1840 : {
1841 0 : if ((behavior & EXTENSION_RETURN_NULL) &&
1842 0 : FILE_POSSIBLY_DELETED(errno))
1843 0 : return NULL;
1844 0 : ereport(ERROR,
1845 : (errcode_for_file_access(),
1846 : errmsg("could not open file \"%s\" (target block %u): %m",
1847 : _mdfd_segpath(reln, forknum, nextsegno).str,
1848 : blkno)));
1849 : }
1850 : }
1851 :
1852 441158 : return v;
1853 : }
1854 :
1855 : /*
1856 : * Get number of blocks present in a single disk file
1857 : */
1858 : static BlockNumber
1859 4842524 : _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1860 : {
1861 : off_t len;
1862 :
1863 4842524 : len = FileSize(seg->mdfd_vfd);
1864 4842524 : if (len < 0)
1865 0 : ereport(ERROR,
1866 : (errcode_for_file_access(),
1867 : errmsg("could not seek to end of file \"%s\": %m",
1868 : FilePathName(seg->mdfd_vfd))));
1869 : /* note that this calculation will ignore any partial block at EOF */
1870 4842524 : return (BlockNumber) (len / BLCKSZ);
1871 : }
1872 :
1873 : /*
1874 : * Sync a file to disk, given a file tag. Write the path into an output
1875 : * buffer so the caller can use it in error messages.
1876 : *
1877 : * Return 0 on success, -1 on failure, with errno set.
1878 : */
1879 : int
1880 0 : mdsyncfiletag(const FileTag *ftag, char *path)
1881 : {
1882 0 : SMgrRelation reln = smgropen(ftag->rlocator, INVALID_PROC_NUMBER);
1883 : File file;
1884 : instr_time io_start;
1885 : bool need_to_close;
1886 : int result,
1887 : save_errno;
1888 :
1889 : /* See if we already have the file open, or need to open it. */
1890 0 : if (ftag->segno < reln->md_num_open_segs[ftag->forknum])
1891 : {
1892 0 : file = reln->md_seg_fds[ftag->forknum][ftag->segno].mdfd_vfd;
1893 0 : strlcpy(path, FilePathName(file), MAXPGPATH);
1894 0 : need_to_close = false;
1895 : }
1896 : else
1897 : {
1898 : MdPathStr p;
1899 :
1900 0 : p = _mdfd_segpath(reln, ftag->forknum, ftag->segno);
1901 0 : strlcpy(path, p.str, MD_PATH_STR_MAXLEN);
1902 :
1903 0 : file = PathNameOpenFile(path, _mdfd_open_flags());
1904 0 : if (file < 0)
1905 0 : return -1;
1906 0 : need_to_close = true;
1907 : }
1908 :
1909 0 : io_start = pgstat_prepare_io_time(track_io_timing);
1910 :
1911 : /* Sync the file. */
1912 0 : result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC);
1913 0 : save_errno = errno;
1914 :
1915 0 : if (need_to_close)
1916 0 : FileClose(file);
1917 :
1918 0 : pgstat_count_io_op_time(IOOBJECT_RELATION, IOCONTEXT_NORMAL,
1919 : IOOP_FSYNC, io_start, 1, 0);
1920 :
1921 0 : errno = save_errno;
1922 0 : return result;
1923 : }
1924 :
1925 : /*
1926 : * Unlink a file, given a file tag. Write the path into an output
1927 : * buffer so the caller can use it in error messages.
1928 : *
1929 : * Return 0 on success, -1 on failure, with errno set.
1930 : */
1931 : int
1932 64820 : mdunlinkfiletag(const FileTag *ftag, char *path)
1933 : {
1934 : RelPathStr p;
1935 :
1936 : /* Compute the path. */
1937 64820 : p = relpathperm(ftag->rlocator, MAIN_FORKNUM);
1938 64820 : strlcpy(path, p.str, MAXPGPATH);
1939 :
1940 : /* Try to unlink the file. */
1941 64820 : return unlink(path);
1942 : }
1943 :
1944 : /*
1945 : * Check if a given candidate request matches a given tag, when processing
1946 : * a SYNC_FILTER_REQUEST request. This will be called for all pending
1947 : * requests to find out whether to forget them.
1948 : */
1949 : bool
1950 14086 : mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
1951 : {
1952 : /*
1953 : * For now we only use filter requests as a way to drop all scheduled
1954 : * callbacks relating to a given database, when dropping the database.
1955 : * We'll return true for all candidates that have the same database OID as
1956 : * the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
1957 : */
1958 14086 : return ftag->rlocator.dbOid == candidate->rlocator.dbOid;
1959 : }
1960 :
1961 : /*
1962 : * AIO completion callback for mdstartreadv().
1963 : */
1964 : static PgAioResult
1965 2255710 : md_readv_complete(PgAioHandle *ioh, PgAioResult prior_result, uint8 cb_data)
1966 : {
1967 2255710 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
1968 2255710 : PgAioResult result = prior_result;
1969 :
1970 2255710 : if (prior_result.result < 0)
1971 : {
1972 14 : result.status = PGAIO_RS_ERROR;
1973 14 : result.id = PGAIO_HCB_MD_READV;
1974 : /* For "hard" errors, track the error number in error_data */
1975 14 : result.error_data = -prior_result.result;
1976 14 : result.result = 0;
1977 :
1978 : /*
1979 : * Immediately log a message about the IO error, but only to the
1980 : * server log. The reason to do so immediately is that the originator
1981 : * might not process the query result immediately (because it is busy
1982 : * doing another part of query processing) or at all (e.g. if it was
1983 : * cancelled or errored out due to another IO also failing). The
1984 : * definer of the IO will emit an ERROR when processing the IO's
1985 : * results
1986 : */
1987 14 : pgaio_result_report(result, td, LOG_SERVER_ONLY);
1988 :
1989 14 : return result;
1990 : }
1991 :
1992 : /*
1993 : * As explained above smgrstartreadv(), the smgr API operates on the level
1994 : * of blocks, rather than bytes. Convert.
1995 : */
1996 2255696 : result.result /= BLCKSZ;
1997 :
1998 : Assert(result.result <= td->smgr.nblocks);
1999 :
2000 2255696 : if (result.result == 0)
2001 : {
2002 : /* consider 0 blocks read a failure */
2003 4 : result.status = PGAIO_RS_ERROR;
2004 4 : result.id = PGAIO_HCB_MD_READV;
2005 4 : result.error_data = 0;
2006 :
2007 : /* see comment above the "hard error" case */
2008 4 : pgaio_result_report(result, td, LOG_SERVER_ONLY);
2009 :
2010 4 : return result;
2011 : }
2012 :
2013 2255692 : if (result.status != PGAIO_RS_ERROR &&
2014 2255692 : result.result < td->smgr.nblocks)
2015 : {
2016 : /* partial reads should be retried at upper level */
2017 24 : result.status = PGAIO_RS_PARTIAL;
2018 24 : result.id = PGAIO_HCB_MD_READV;
2019 : }
2020 :
2021 2255692 : return result;
2022 : }
2023 :
2024 : /*
2025 : * AIO error reporting callback for mdstartreadv().
2026 : *
2027 : * Errors are encoded as follows:
2028 : * - PgAioResult.error_data != 0 encodes IO that failed with that errno
2029 : * - PgAioResult.error_data == 0 encodes IO that didn't read all data
2030 : */
2031 : static void
2032 56 : md_readv_report(PgAioResult result, const PgAioTargetData *td, int elevel)
2033 : {
2034 : RelPathStr path;
2035 :
2036 56 : path = relpathbackend(td->smgr.rlocator,
2037 : td->smgr.is_temp ? MyProcNumber : INVALID_PROC_NUMBER,
2038 : td->smgr.forkNum);
2039 :
2040 56 : if (result.error_data != 0)
2041 : {
2042 : /* for errcode_for_file_access() and %m */
2043 28 : errno = result.error_data;
2044 :
2045 28 : ereport(elevel,
2046 : errcode_for_file_access(),
2047 : errmsg("could not read blocks %u..%u in file \"%s\": %m",
2048 : td->smgr.blockNum,
2049 : td->smgr.blockNum + td->smgr.nblocks - 1,
2050 : path.str));
2051 : }
2052 : else
2053 : {
2054 : /*
2055 : * NB: This will typically only be output in debug messages, while
2056 : * retrying a partial IO.
2057 : */
2058 28 : ereport(elevel,
2059 : errcode(ERRCODE_DATA_CORRUPTED),
2060 : errmsg("could not read blocks %u..%u in file \"%s\": read only %zu of %zu bytes",
2061 : td->smgr.blockNum,
2062 : td->smgr.blockNum + td->smgr.nblocks - 1,
2063 : path.str,
2064 : result.result * (size_t) BLCKSZ,
2065 : td->smgr.nblocks * (size_t) BLCKSZ));
2066 : }
2067 38 : }
|