Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * md.c
4 : * This code manages relations that reside on magnetic disk.
5 : *
6 : * Or at least, that was what the Berkeley folk had in mind when they named
7 : * this file. In reality, what this code provides is an interface from
8 : * the smgr API to Unix-like filesystem APIs, so it will work with any type
9 : * of device for which the operating system provides filesystem support.
10 : * It doesn't matter whether the bits are on spinning rust or some other
11 : * storage technology.
12 : *
13 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
14 : * Portions Copyright (c) 1994, Regents of the University of California
15 : *
16 : *
17 : * IDENTIFICATION
18 : * src/backend/storage/smgr/md.c
19 : *
20 : *-------------------------------------------------------------------------
21 : */
22 : #include "postgres.h"
23 :
24 : #include <unistd.h>
25 : #include <fcntl.h>
26 : #include <sys/file.h>
27 :
28 : #include "access/xlogutils.h"
29 : #include "commands/tablespace.h"
30 : #include "common/file_utils.h"
31 : #include "miscadmin.h"
32 : #include "pg_trace.h"
33 : #include "pgstat.h"
34 : #include "storage/aio.h"
35 : #include "storage/bufmgr.h"
36 : #include "storage/fd.h"
37 : #include "storage/md.h"
38 : #include "storage/relfilelocator.h"
39 : #include "storage/smgr.h"
40 : #include "storage/sync.h"
41 : #include "utils/memutils.h"
42 :
43 : /*
44 : * The magnetic disk storage manager keeps track of open file
45 : * descriptors in its own descriptor pool. This is done to make it
46 : * easier to support relations that are larger than the operating
47 : * system's file size limit (often 2GBytes). In order to do that,
48 : * we break relations up into "segment" files that are each shorter than
49 : * the OS file size limit. The segment size is set by the RELSEG_SIZE
50 : * configuration constant in pg_config.h.
51 : *
52 : * On disk, a relation must consist of consecutively numbered segment
53 : * files in the pattern
54 : * -- Zero or more full segments of exactly RELSEG_SIZE blocks each
55 : * -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
56 : * -- Optionally, any number of inactive segments of size 0 blocks.
57 : * The full and partial segments are collectively the "active" segments.
58 : * Inactive segments are those that once contained data but are currently
59 : * not needed because of an mdtruncate() operation. The reason for leaving
60 : * them present at size zero, rather than unlinking them, is that other
61 : * backends and/or the checkpointer might be holding open file references to
62 : * such segments. If the relation expands again after mdtruncate(), such
63 : * that a deactivated segment becomes active again, it is important that
64 : * such file references still be valid --- else data might get written
65 : * out to an unlinked old copy of a segment file that will eventually
66 : * disappear.
67 : *
68 : * File descriptors are stored in the per-fork md_seg_fds arrays inside
69 : * SMgrRelation. The length of these arrays is stored in md_num_open_segs.
70 : * Note that a fork's md_num_open_segs having a specific value does not
71 : * necessarily mean the relation doesn't have additional segments; we may
72 : * just not have opened the next segment yet. (We could not have "all
73 : * segments are in the array" as an invariant anyway, since another backend
74 : * could extend the relation while we aren't looking.) We do not have
75 : * entries for inactive segments, however; as soon as we find a partial
76 : * segment, we assume that any subsequent segments are inactive.
77 : *
78 : * The entire MdfdVec array is palloc'd in the MdCxt memory context.
79 : */
80 :
81 : typedef struct _MdfdVec
82 : {
83 : File mdfd_vfd; /* fd number in fd.c's pool */
84 : BlockNumber mdfd_segno; /* segment number, from 0 */
85 : } MdfdVec;
86 :
87 : static MemoryContext MdCxt; /* context for all MdfdVec objects */
88 :
89 :
90 : /* Populate a file tag describing an md.c segment file. */
91 : #define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \
92 : ( \
93 : memset(&(a), 0, sizeof(FileTag)), \
94 : (a).handler = SYNC_HANDLER_MD, \
95 : (a).rlocator = (xx_rlocator), \
96 : (a).forknum = (xx_forknum), \
97 : (a).segno = (xx_segno) \
98 : )
99 :
100 :
101 : /*** behavior for mdopen & _mdfd_getseg ***/
102 : /* ereport if segment not present */
103 : #define EXTENSION_FAIL (1 << 0)
104 : /* return NULL if segment not present */
105 : #define EXTENSION_RETURN_NULL (1 << 1)
106 : /* create new segments as needed */
107 : #define EXTENSION_CREATE (1 << 2)
108 : /* create new segments if needed during recovery */
109 : #define EXTENSION_CREATE_RECOVERY (1 << 3)
110 : /* don't try to open a segment, if not already open */
111 : #define EXTENSION_DONT_OPEN (1 << 5)
112 :
113 :
114 : /*
115 : * Fixed-length string to represent paths to files that need to be built by
116 : * md.c.
117 : *
118 : * The maximum number of segments is MaxBlockNumber / RELSEG_SIZE, where
119 : * RELSEG_SIZE can be set to 1 (for testing only).
120 : */
121 : #define SEGMENT_CHARS OIDCHARS
122 : #define MD_PATH_STR_MAXLEN \
123 : (\
124 : REL_PATH_STR_MAXLEN \
125 : + sizeof((char)'.') \
126 : + SEGMENT_CHARS \
127 : )
128 : typedef struct MdPathStr
129 : {
130 : char str[MD_PATH_STR_MAXLEN + 1];
131 : } MdPathStr;
132 :
133 :
134 : /* local routines */
135 : static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forknum,
136 : bool isRedo);
137 : static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
138 : static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
139 : MdfdVec *seg);
140 : static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
141 : BlockNumber segno);
142 : static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
143 : BlockNumber segno);
144 : static void _fdvec_resize(SMgrRelation reln,
145 : ForkNumber forknum,
146 : int nseg);
147 : static MdPathStr _mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
148 : BlockNumber segno);
149 : static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forknum,
150 : BlockNumber segno, int oflags);
151 : static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forknum,
152 : BlockNumber blkno, bool skipFsync, int behavior);
153 : static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
154 : MdfdVec *seg);
155 :
156 : static PgAioResult md_readv_complete(PgAioHandle *ioh, PgAioResult prior_result, uint8 cb_data);
157 : static void md_readv_report(PgAioResult result, const PgAioTargetData *target_data, int elevel);
158 :
159 : const PgAioHandleCallbacks aio_md_readv_cb = {
160 : .complete_shared = md_readv_complete,
161 : .report = md_readv_report,
162 : };
163 :
164 :
165 : static inline int
166 2615848 : _mdfd_open_flags(void)
167 : {
168 2615848 : int flags = O_RDWR | PG_BINARY;
169 :
170 2615848 : if (io_direct_flags & IO_DIRECT_DATA)
171 614 : flags |= PG_O_DIRECT;
172 :
173 2615848 : return flags;
174 : }
175 :
176 : /*
177 : * mdinit() -- Initialize private state for magnetic disk storage manager.
178 : */
179 : void
180 42286 : mdinit(void)
181 : {
182 42286 : MdCxt = AllocSetContextCreate(TopMemoryContext,
183 : "MdSmgr",
184 : ALLOCSET_DEFAULT_SIZES);
185 42286 : }
186 :
187 : /*
188 : * mdexists() -- Does the physical file exist?
189 : *
190 : * Note: this will return true for lingering files, with pending deletions
191 : */
192 : bool
193 1085918 : mdexists(SMgrRelation reln, ForkNumber forknum)
194 : {
195 : /*
196 : * Close it first, to ensure that we notice if the fork has been unlinked
197 : * since we opened it. As an optimization, we can skip that in recovery,
198 : * which already closes relations when dropping them.
199 : */
200 1085918 : if (!InRecovery)
201 1044846 : mdclose(reln, forknum);
202 :
203 1085918 : return (mdopenfork(reln, forknum, EXTENSION_RETURN_NULL) != NULL);
204 : }
205 :
206 : /*
207 : * mdcreate() -- Create a new relation on magnetic disk.
208 : *
209 : * If isRedo is true, it's okay for the relation to exist already.
210 : */
211 : void
212 11151704 : mdcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
213 : {
214 : MdfdVec *mdfd;
215 : RelPathStr path;
216 : File fd;
217 :
218 11151704 : if (isRedo && reln->md_num_open_segs[forknum] > 0)
219 10848190 : return; /* created and opened already... */
220 :
221 : Assert(reln->md_num_open_segs[forknum] == 0);
222 :
223 : /*
224 : * We may be using the target table space for the first time in this
225 : * database, so create a per-database subdirectory if needed.
226 : *
227 : * XXX this is a fairly ugly violation of module layering, but this seems
228 : * to be the best place to put the check. Maybe TablespaceCreateDbspace
229 : * should be here and not in commands/tablespace.c? But that would imply
230 : * importing a lot of stuff that smgr.c oughtn't know, either.
231 : */
232 303514 : TablespaceCreateDbspace(reln->smgr_rlocator.locator.spcOid,
233 : reln->smgr_rlocator.locator.dbOid,
234 : isRedo);
235 :
236 303514 : path = relpath(reln->smgr_rlocator, forknum);
237 :
238 303514 : fd = PathNameOpenFile(path.str, _mdfd_open_flags() | O_CREAT | O_EXCL);
239 :
240 303514 : if (fd < 0)
241 : {
242 8802 : int save_errno = errno;
243 :
244 8802 : if (isRedo)
245 8802 : fd = PathNameOpenFile(path.str, _mdfd_open_flags());
246 8802 : if (fd < 0)
247 : {
248 : /* be sure to report the error reported by create, not open */
249 0 : errno = save_errno;
250 0 : ereport(ERROR,
251 : (errcode_for_file_access(),
252 : errmsg("could not create file \"%s\": %m", path.str)));
253 : }
254 : }
255 :
256 303514 : _fdvec_resize(reln, forknum, 1);
257 303514 : mdfd = &reln->md_seg_fds[forknum][0];
258 303514 : mdfd->mdfd_vfd = fd;
259 303514 : mdfd->mdfd_segno = 0;
260 :
261 303514 : if (!SmgrIsTemp(reln))
262 297080 : register_dirty_segment(reln, forknum, mdfd);
263 : }
264 :
265 : /*
266 : * mdunlink() -- Unlink a relation.
267 : *
268 : * Note that we're passed a RelFileLocatorBackend --- by the time this is called,
269 : * there won't be an SMgrRelation hashtable entry anymore.
270 : *
271 : * forknum can be a fork number to delete a specific fork, or InvalidForkNumber
272 : * to delete all forks.
273 : *
274 : * For regular relations, we don't unlink the first segment file of the rel,
275 : * but just truncate it to zero length, and record a request to unlink it after
276 : * the next checkpoint. Additional segments can be unlinked immediately,
277 : * however. Leaving the empty file in place prevents that relfilenumber
278 : * from being reused. The scenario this protects us from is:
279 : * 1. We delete a relation (and commit, and actually remove its file).
280 : * 2. We create a new relation, which by chance gets the same relfilenumber as
281 : * the just-deleted one (OIDs must've wrapped around for that to happen).
282 : * 3. We crash before another checkpoint occurs.
283 : * During replay, we would delete the file and then recreate it, which is fine
284 : * if the contents of the file were repopulated by subsequent WAL entries.
285 : * But if we didn't WAL-log insertions, but instead relied on fsyncing the
286 : * file after populating it (as we do at wal_level=minimal), the contents of
287 : * the file would be lost forever. By leaving the empty file until after the
288 : * next checkpoint, we prevent reassignment of the relfilenumber until it's
289 : * safe, because relfilenumber assignment skips over any existing file.
290 : *
291 : * Additional segments, if any, are truncated and then unlinked. The reason
292 : * for truncating is that other backends may still hold open FDs for these at
293 : * the smgr level, so that the kernel can't remove the file yet. We want to
294 : * reclaim the disk space right away despite that.
295 : *
296 : * We do not need to go through this dance for temp relations, though, because
297 : * we never make WAL entries for temp rels, and so a temp rel poses no threat
298 : * to the health of a regular rel that has taken over its relfilenumber.
299 : * The fact that temp rels and regular rels have different file naming
300 : * patterns provides additional safety. Other backends shouldn't have open
301 : * FDs for them, either.
302 : *
303 : * We also don't do it while performing a binary upgrade. There is no reuse
304 : * hazard in that case, since after a crash or even a simple ERROR, the
305 : * upgrade fails and the whole cluster must be recreated from scratch.
306 : * Furthermore, it is important to remove the files from disk immediately,
307 : * because we may be about to reuse the same relfilenumber.
308 : *
309 : * All the above applies only to the relation's main fork; other forks can
310 : * just be removed immediately, since they are not needed to prevent the
311 : * relfilenumber from being recycled. Also, we do not carefully
312 : * track whether other forks have been created or not, but just attempt to
313 : * unlink them unconditionally; so we should never complain about ENOENT.
314 : *
315 : * If isRedo is true, it's unsurprising for the relation to be already gone.
316 : * Also, we should remove the file immediately instead of queuing a request
317 : * for later, since during redo there's no possibility of creating a
318 : * conflicting relation.
319 : *
320 : * Note: we currently just never warn about ENOENT at all. We could warn in
321 : * the main-fork, non-isRedo case, but it doesn't seem worth the trouble.
322 : *
323 : * Note: any failure should be reported as WARNING not ERROR, because
324 : * we are usually not in a transaction anymore when this is called.
325 : */
326 : void
327 358376 : mdunlink(RelFileLocatorBackend rlocator, ForkNumber forknum, bool isRedo)
328 : {
329 : /* Now do the per-fork work */
330 358376 : if (forknum == InvalidForkNumber)
331 : {
332 0 : for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
333 0 : mdunlinkfork(rlocator, forknum, isRedo);
334 : }
335 : else
336 358376 : mdunlinkfork(rlocator, forknum, isRedo);
337 358376 : }
338 :
339 : /*
340 : * Truncate a file to release disk space.
341 : */
342 : static int
343 420932 : do_truncate(const char *path)
344 : {
345 : int save_errno;
346 : int ret;
347 :
348 420932 : ret = pg_truncate(path, 0);
349 :
350 : /* Log a warning here to avoid repetition in callers. */
351 420932 : if (ret < 0 && errno != ENOENT)
352 : {
353 0 : save_errno = errno;
354 0 : ereport(WARNING,
355 : (errcode_for_file_access(),
356 : errmsg("could not truncate file \"%s\": %m", path)));
357 0 : errno = save_errno;
358 : }
359 :
360 420932 : return ret;
361 : }
362 :
363 : static void
364 358376 : mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forknum, bool isRedo)
365 : {
366 : RelPathStr path;
367 : int ret;
368 : int save_errno;
369 :
370 358376 : path = relpath(rlocator, forknum);
371 :
372 : /*
373 : * Truncate and then unlink the first segment, or just register a request
374 : * to unlink it later, as described in the comments for mdunlink().
375 : */
376 358376 : if (isRedo || IsBinaryUpgrade || forknum != MAIN_FORKNUM ||
377 74602 : RelFileLocatorBackendIsTemp(rlocator))
378 : {
379 289902 : if (!RelFileLocatorBackendIsTemp(rlocator))
380 : {
381 : /* Prevent other backends' fds from holding on to the disk space */
382 265390 : ret = do_truncate(path.str);
383 :
384 : /* Forget any pending sync requests for the first segment */
385 265390 : save_errno = errno;
386 265390 : register_forget_request(rlocator, forknum, 0 /* first seg */ );
387 265390 : errno = save_errno;
388 : }
389 : else
390 24512 : ret = 0;
391 :
392 : /* Next unlink the file, unless it was already found to be missing */
393 289902 : if (ret >= 0 || errno != ENOENT)
394 : {
395 43114 : ret = unlink(path.str);
396 43114 : if (ret < 0 && errno != ENOENT)
397 : {
398 0 : save_errno = errno;
399 0 : ereport(WARNING,
400 : (errcode_for_file_access(),
401 : errmsg("could not remove file \"%s\": %m", path.str)));
402 0 : errno = save_errno;
403 : }
404 : }
405 : }
406 : else
407 : {
408 : /* Prevent other backends' fds from holding on to the disk space */
409 68474 : ret = do_truncate(path.str);
410 :
411 : /* Register request to unlink first segment later */
412 68474 : save_errno = errno;
413 68474 : register_unlink_segment(rlocator, forknum, 0 /* first seg */ );
414 68474 : errno = save_errno;
415 : }
416 :
417 : /*
418 : * Delete any additional segments.
419 : *
420 : * Note that because we loop until getting ENOENT, we will correctly
421 : * remove all inactive segments as well as active ones. Ideally we'd
422 : * continue the loop until getting exactly that errno, but that risks an
423 : * infinite loop if the problem is directory-wide (for instance, if we
424 : * suddenly can't read the data directory itself). We compromise by
425 : * continuing after a non-ENOENT truncate error, but stopping after any
426 : * unlink error. If there is indeed a directory-wide problem, additional
427 : * unlink attempts wouldn't work anyway.
428 : */
429 358376 : if (ret >= 0 || errno != ENOENT)
430 : {
431 : MdPathStr segpath;
432 : BlockNumber segno;
433 :
434 93502 : for (segno = 1;; segno++)
435 : {
436 93502 : sprintf(segpath.str, "%s.%u", path.str, segno);
437 :
438 93502 : if (!RelFileLocatorBackendIsTemp(rlocator))
439 : {
440 : /*
441 : * Prevent other backends' fds from holding on to the disk
442 : * space. We're done if we see ENOENT, though.
443 : */
444 87068 : if (do_truncate(segpath.str) < 0 && errno == ENOENT)
445 87068 : break;
446 :
447 : /*
448 : * Forget any pending sync requests for this segment before we
449 : * try to unlink.
450 : */
451 0 : register_forget_request(rlocator, forknum, segno);
452 : }
453 :
454 6434 : if (unlink(segpath.str) < 0)
455 : {
456 : /* ENOENT is expected after the last segment... */
457 6434 : if (errno != ENOENT)
458 0 : ereport(WARNING,
459 : (errcode_for_file_access(),
460 : errmsg("could not remove file \"%s\": %m", segpath.str)));
461 6434 : break;
462 : }
463 : }
464 : }
465 358376 : }
466 :
467 : /*
468 : * mdextend() -- Add a block to the specified relation.
469 : *
470 : * The semantics are nearly the same as mdwrite(): write at the
471 : * specified position. However, this is to be used for the case of
472 : * extending a relation (i.e., blocknum is at or beyond the current
473 : * EOF). Note that we assume writing a block beyond current EOF
474 : * causes intervening file space to become filled with zeroes.
475 : */
476 : void
477 227266 : mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
478 : const void *buffer, bool skipFsync)
479 : {
480 : off_t seekpos;
481 : int nbytes;
482 : MdfdVec *v;
483 :
484 : /* If this build supports direct I/O, the buffer must be I/O aligned. */
485 : if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ)
486 : Assert((uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer));
487 :
488 : /* This assert is too expensive to have on normally ... */
489 : #ifdef CHECK_WRITE_VS_EXTEND
490 : Assert(blocknum >= mdnblocks(reln, forknum));
491 : #endif
492 :
493 : /*
494 : * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
495 : * more --- we mustn't create a block whose number actually is
496 : * InvalidBlockNumber. (Note that this failure should be unreachable
497 : * because of upstream checks in bufmgr.c.)
498 : */
499 227266 : if (blocknum == InvalidBlockNumber)
500 0 : ereport(ERROR,
501 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
502 : errmsg("cannot extend file \"%s\" beyond %u blocks",
503 : relpath(reln->smgr_rlocator, forknum).str,
504 : InvalidBlockNumber)));
505 :
506 227266 : v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
507 :
508 227266 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
509 :
510 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
511 :
512 227266 : if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_EXTEND)) != BLCKSZ)
513 : {
514 0 : if (nbytes < 0)
515 0 : ereport(ERROR,
516 : (errcode_for_file_access(),
517 : errmsg("could not extend file \"%s\": %m",
518 : FilePathName(v->mdfd_vfd)),
519 : errhint("Check free disk space.")));
520 : /* short write: complain appropriately */
521 0 : ereport(ERROR,
522 : (errcode(ERRCODE_DISK_FULL),
523 : errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
524 : FilePathName(v->mdfd_vfd),
525 : nbytes, BLCKSZ, blocknum),
526 : errhint("Check free disk space.")));
527 : }
528 :
529 227266 : if (!skipFsync && !SmgrIsTemp(reln))
530 58 : register_dirty_segment(reln, forknum, v);
531 :
532 : Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
533 227266 : }
534 :
535 : /*
536 : * mdzeroextend() -- Add new zeroed out blocks to the specified relation.
537 : *
538 : * Similar to mdextend(), except the relation can be extended by multiple
539 : * blocks at once and the added blocks will be filled with zeroes.
540 : */
541 : void
542 407346 : mdzeroextend(SMgrRelation reln, ForkNumber forknum,
543 : BlockNumber blocknum, int nblocks, bool skipFsync)
544 : {
545 : MdfdVec *v;
546 407346 : BlockNumber curblocknum = blocknum;
547 407346 : int remblocks = nblocks;
548 :
549 : Assert(nblocks > 0);
550 :
551 : /* This assert is too expensive to have on normally ... */
552 : #ifdef CHECK_WRITE_VS_EXTEND
553 : Assert(blocknum >= mdnblocks(reln, forknum));
554 : #endif
555 :
556 : /*
557 : * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
558 : * more --- we mustn't create a block whose number actually is
559 : * InvalidBlockNumber or larger.
560 : */
561 407346 : if ((uint64) blocknum + nblocks >= (uint64) InvalidBlockNumber)
562 0 : ereport(ERROR,
563 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
564 : errmsg("cannot extend file \"%s\" beyond %u blocks",
565 : relpath(reln->smgr_rlocator, forknum).str,
566 : InvalidBlockNumber)));
567 :
568 814692 : while (remblocks > 0)
569 : {
570 407346 : BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
571 407346 : off_t seekpos = (off_t) BLCKSZ * segstartblock;
572 : int numblocks;
573 :
574 407346 : if (segstartblock + remblocks > RELSEG_SIZE)
575 0 : numblocks = RELSEG_SIZE - segstartblock;
576 : else
577 407346 : numblocks = remblocks;
578 :
579 407346 : v = _mdfd_getseg(reln, forknum, curblocknum, skipFsync, EXTENSION_CREATE);
580 :
581 : Assert(segstartblock < RELSEG_SIZE);
582 : Assert(segstartblock + numblocks <= RELSEG_SIZE);
583 :
584 : /*
585 : * If available and useful, use posix_fallocate() (via
586 : * FileFallocate()) to extend the relation. That's often more
587 : * efficient than using write(), as it commonly won't cause the kernel
588 : * to allocate page cache space for the extended pages.
589 : *
590 : * However, we don't use FileFallocate() for small extensions, as it
591 : * defeats delayed allocation on some filesystems. Not clear where
592 : * that decision should be made though? For now just use a cutoff of
593 : * 8, anything between 4 and 8 worked OK in some local testing.
594 : */
595 407346 : if (numblocks > 8)
596 : {
597 : int ret;
598 :
599 1000 : ret = FileFallocate(v->mdfd_vfd,
600 : seekpos, (off_t) BLCKSZ * numblocks,
601 : WAIT_EVENT_DATA_FILE_EXTEND);
602 1000 : if (ret != 0)
603 : {
604 0 : ereport(ERROR,
605 : errcode_for_file_access(),
606 : errmsg("could not extend file \"%s\" with FileFallocate(): %m",
607 : FilePathName(v->mdfd_vfd)),
608 : errhint("Check free disk space."));
609 : }
610 : }
611 : else
612 : {
613 : int ret;
614 :
615 : /*
616 : * Even if we don't want to use fallocate, we can still extend a
617 : * bit more efficiently than writing each 8kB block individually.
618 : * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
619 : * to avoid multiple writes or needing a zeroed buffer for the
620 : * whole length of the extension.
621 : */
622 406346 : ret = FileZero(v->mdfd_vfd,
623 : seekpos, (off_t) BLCKSZ * numblocks,
624 : WAIT_EVENT_DATA_FILE_EXTEND);
625 406346 : if (ret < 0)
626 0 : ereport(ERROR,
627 : errcode_for_file_access(),
628 : errmsg("could not extend file \"%s\": %m",
629 : FilePathName(v->mdfd_vfd)),
630 : errhint("Check free disk space."));
631 : }
632 :
633 407346 : if (!skipFsync && !SmgrIsTemp(reln))
634 387638 : register_dirty_segment(reln, forknum, v);
635 :
636 : Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
637 :
638 407346 : remblocks -= numblocks;
639 407346 : curblocknum += numblocks;
640 : }
641 407346 : }
642 :
643 : /*
644 : * mdopenfork() -- Open one fork of the specified relation.
645 : *
646 : * Note we only open the first segment, when there are multiple segments.
647 : *
648 : * If first segment is not present, either ereport or return NULL according
649 : * to "behavior". We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
650 : * EXTENSION_CREATE means it's OK to extend an existing relation, not to
651 : * invent one out of whole cloth.
652 : */
653 : static MdfdVec *
654 6751272 : mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
655 : {
656 : MdfdVec *mdfd;
657 : RelPathStr path;
658 : File fd;
659 :
660 : /* No work if already open */
661 6751272 : if (reln->md_num_open_segs[forknum] > 0)
662 4494814 : return &reln->md_seg_fds[forknum][0];
663 :
664 2256458 : path = relpath(reln->smgr_rlocator, forknum);
665 :
666 2256458 : fd = PathNameOpenFile(path.str, _mdfd_open_flags());
667 :
668 2256458 : if (fd < 0)
669 : {
670 730956 : if ((behavior & EXTENSION_RETURN_NULL) &&
671 730912 : FILE_POSSIBLY_DELETED(errno))
672 730912 : return NULL;
673 44 : ereport(ERROR,
674 : (errcode_for_file_access(),
675 : errmsg("could not open file \"%s\": %m", path.str)));
676 : }
677 :
678 1525502 : _fdvec_resize(reln, forknum, 1);
679 1525502 : mdfd = &reln->md_seg_fds[forknum][0];
680 1525502 : mdfd->mdfd_vfd = fd;
681 1525502 : mdfd->mdfd_segno = 0;
682 :
683 : Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber) RELSEG_SIZE));
684 :
685 1525502 : return mdfd;
686 : }
687 :
688 : /*
689 : * mdopen() -- Initialize newly-opened relation.
690 : */
691 : void
692 2174434 : mdopen(SMgrRelation reln)
693 : {
694 : /* mark it not open */
695 10872170 : for (int forknum = 0; forknum <= MAX_FORKNUM; forknum++)
696 8697736 : reln->md_num_open_segs[forknum] = 0;
697 2174434 : }
698 :
699 : /*
700 : * mdclose() -- Close the specified relation, if it isn't closed already.
701 : */
702 : void
703 6959238 : mdclose(SMgrRelation reln, ForkNumber forknum)
704 : {
705 6959238 : int nopensegs = reln->md_num_open_segs[forknum];
706 :
707 : /* No work if already closed */
708 6959238 : if (nopensegs == 0)
709 5929744 : return;
710 :
711 : /* close segments starting from the end */
712 2058988 : while (nopensegs > 0)
713 : {
714 1029494 : MdfdVec *v = &reln->md_seg_fds[forknum][nopensegs - 1];
715 :
716 1029494 : FileClose(v->mdfd_vfd);
717 1029494 : _fdvec_resize(reln, forknum, nopensegs - 1);
718 1029494 : nopensegs--;
719 : }
720 : }
721 :
722 : /*
723 : * mdprefetch() -- Initiate asynchronous read of the specified blocks of a relation
724 : */
725 : bool
726 16800 : mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
727 : int nblocks)
728 : {
729 : #ifdef USE_PREFETCH
730 :
731 : Assert((io_direct_flags & IO_DIRECT_DATA) == 0);
732 :
733 16800 : if ((uint64) blocknum + nblocks > (uint64) MaxBlockNumber + 1)
734 0 : return false;
735 :
736 33600 : while (nblocks > 0)
737 : {
738 : off_t seekpos;
739 : MdfdVec *v;
740 : int nblocks_this_segment;
741 :
742 16800 : v = _mdfd_getseg(reln, forknum, blocknum, false,
743 16800 : InRecovery ? EXTENSION_RETURN_NULL : EXTENSION_FAIL);
744 16800 : if (v == NULL)
745 0 : return false;
746 :
747 16800 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
748 :
749 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
750 :
751 16800 : nblocks_this_segment =
752 16800 : Min(nblocks,
753 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
754 :
755 16800 : (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ * nblocks_this_segment,
756 : WAIT_EVENT_DATA_FILE_PREFETCH);
757 :
758 16800 : blocknum += nblocks_this_segment;
759 16800 : nblocks -= nblocks_this_segment;
760 : }
761 : #endif /* USE_PREFETCH */
762 :
763 16800 : return true;
764 : }
765 :
766 : /*
767 : * Convert an array of buffer address into an array of iovec objects, and
768 : * return the number that were required. 'iov' must have enough space for up
769 : * to 'nblocks' elements, but the number used may be less depending on
770 : * merging. In the case of a run of fully contiguous buffers, a single iovec
771 : * will be populated that can be handled as a plain non-vectored I/O.
772 : */
773 : static int
774 3437948 : buffers_to_iovec(struct iovec *iov, void **buffers, int nblocks)
775 : {
776 : struct iovec *iovp;
777 : int iovcnt;
778 :
779 : Assert(nblocks >= 1);
780 :
781 : /* If this build supports direct I/O, buffers must be I/O aligned. */
782 7197848 : for (int i = 0; i < nblocks; ++i)
783 : {
784 : if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ)
785 : Assert((uintptr_t) buffers[i] ==
786 : TYPEALIGN(PG_IO_ALIGN_SIZE, buffers[i]));
787 : }
788 :
789 : /* Start the first iovec off with the first buffer. */
790 3437948 : iovp = &iov[0];
791 3437948 : iovp->iov_base = buffers[0];
792 3437948 : iovp->iov_len = BLCKSZ;
793 3437948 : iovcnt = 1;
794 :
795 : /* Try to merge the rest. */
796 3759900 : for (int i = 1; i < nblocks; ++i)
797 : {
798 321952 : void *buffer = buffers[i];
799 :
800 321952 : if (((char *) iovp->iov_base + iovp->iov_len) == buffer)
801 : {
802 : /* Contiguous with the last iovec. */
803 310476 : iovp->iov_len += BLCKSZ;
804 : }
805 : else
806 : {
807 : /* Need a new iovec. */
808 11476 : iovp++;
809 11476 : iovp->iov_base = buffer;
810 11476 : iovp->iov_len = BLCKSZ;
811 11476 : iovcnt++;
812 : }
813 : }
814 :
815 3437948 : return iovcnt;
816 : }
817 :
818 : /*
819 : * mdmaxcombine() -- Return the maximum number of total blocks that can be
820 : * combined with an IO starting at blocknum.
821 : */
822 : uint32
823 65258 : mdmaxcombine(SMgrRelation reln, ForkNumber forknum,
824 : BlockNumber blocknum)
825 : {
826 : BlockNumber segoff;
827 :
828 65258 : segoff = blocknum % ((BlockNumber) RELSEG_SIZE);
829 :
830 65258 : return RELSEG_SIZE - segoff;
831 : }
832 :
833 : /*
834 : * mdreadv() -- Read the specified blocks from a relation.
835 : */
836 : void
837 1196 : mdreadv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
838 : void **buffers, BlockNumber nblocks)
839 : {
840 2392 : while (nblocks > 0)
841 : {
842 : struct iovec iov[PG_IOV_MAX];
843 : int iovcnt;
844 : off_t seekpos;
845 : int nbytes;
846 : MdfdVec *v;
847 : BlockNumber nblocks_this_segment;
848 : size_t transferred_this_segment;
849 : size_t size_this_segment;
850 :
851 1196 : v = _mdfd_getseg(reln, forknum, blocknum, false,
852 : EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
853 :
854 1196 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
855 :
856 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
857 :
858 1196 : nblocks_this_segment =
859 1196 : Min(nblocks,
860 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
861 1196 : nblocks_this_segment = Min(nblocks_this_segment, lengthof(iov));
862 :
863 1196 : if (nblocks_this_segment != nblocks)
864 0 : elog(ERROR, "read crosses segment boundary");
865 :
866 1196 : iovcnt = buffers_to_iovec(iov, buffers, nblocks_this_segment);
867 1196 : size_this_segment = nblocks_this_segment * BLCKSZ;
868 1196 : transferred_this_segment = 0;
869 :
870 : /*
871 : * Inner loop to continue after a short read. We'll keep going until
872 : * we hit EOF rather than assuming that a short read means we hit the
873 : * end.
874 : */
875 : for (;;)
876 : {
877 : TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
878 : reln->smgr_rlocator.locator.spcOid,
879 : reln->smgr_rlocator.locator.dbOid,
880 : reln->smgr_rlocator.locator.relNumber,
881 : reln->smgr_rlocator.backend);
882 1196 : nbytes = FileReadV(v->mdfd_vfd, iov, iovcnt, seekpos,
883 : WAIT_EVENT_DATA_FILE_READ);
884 : TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
885 : reln->smgr_rlocator.locator.spcOid,
886 : reln->smgr_rlocator.locator.dbOid,
887 : reln->smgr_rlocator.locator.relNumber,
888 : reln->smgr_rlocator.backend,
889 : nbytes,
890 : size_this_segment - transferred_this_segment);
891 :
892 : #ifdef SIMULATE_SHORT_READ
893 : nbytes = Min(nbytes, 4096);
894 : #endif
895 :
896 1196 : if (nbytes < 0)
897 0 : ereport(ERROR,
898 : (errcode_for_file_access(),
899 : errmsg("could not read blocks %u..%u in file \"%s\": %m",
900 : blocknum,
901 : blocknum + nblocks_this_segment - 1,
902 : FilePathName(v->mdfd_vfd))));
903 :
904 1196 : if (nbytes == 0)
905 : {
906 : /*
907 : * We are at or past EOF, or we read a partial block at EOF.
908 : * Normally this is an error; upper levels should never try to
909 : * read a nonexistent block. However, if zero_damaged_pages
910 : * is ON or we are InRecovery, we should instead return zeroes
911 : * without complaining. This allows, for example, the case of
912 : * trying to update a block that was later truncated away.
913 : */
914 0 : if (zero_damaged_pages || InRecovery)
915 : {
916 0 : for (BlockNumber i = transferred_this_segment / BLCKSZ;
917 : i < nblocks_this_segment;
918 0 : ++i)
919 0 : memset(buffers[i], 0, BLCKSZ);
920 0 : break;
921 : }
922 : else
923 0 : ereport(ERROR,
924 : (errcode(ERRCODE_DATA_CORRUPTED),
925 : errmsg("could not read blocks %u..%u in file \"%s\": read only %zu of %zu bytes",
926 : blocknum,
927 : blocknum + nblocks_this_segment - 1,
928 : FilePathName(v->mdfd_vfd),
929 : transferred_this_segment,
930 : size_this_segment)));
931 : }
932 :
933 : /* One loop should usually be enough. */
934 1196 : transferred_this_segment += nbytes;
935 : Assert(transferred_this_segment <= size_this_segment);
936 1196 : if (transferred_this_segment == size_this_segment)
937 1196 : break;
938 :
939 : /* Adjust position and vectors after a short read. */
940 0 : seekpos += nbytes;
941 0 : iovcnt = compute_remaining_iovec(iov, iov, iovcnt, nbytes);
942 : }
943 :
944 1196 : nblocks -= nblocks_this_segment;
945 1196 : buffers += nblocks_this_segment;
946 1196 : blocknum += nblocks_this_segment;
947 : }
948 1196 : }
949 :
950 : /*
951 : * mdstartreadv() -- Asynchronous version of mdreadv().
952 : */
953 : void
954 2423508 : mdstartreadv(PgAioHandle *ioh,
955 : SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
956 : void **buffers, BlockNumber nblocks)
957 : {
958 : off_t seekpos;
959 : MdfdVec *v;
960 : BlockNumber nblocks_this_segment;
961 : struct iovec *iov;
962 : int iovcnt;
963 : int ret;
964 :
965 2423508 : v = _mdfd_getseg(reln, forknum, blocknum, false,
966 : EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
967 :
968 2423478 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
969 :
970 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
971 :
972 2423478 : nblocks_this_segment =
973 2423478 : Min(nblocks,
974 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
975 :
976 2423478 : if (nblocks_this_segment != nblocks)
977 0 : elog(ERROR, "read crossing segment boundary");
978 :
979 2423478 : iovcnt = pgaio_io_get_iovec(ioh, &iov);
980 :
981 : Assert(nblocks <= iovcnt);
982 :
983 2423478 : iovcnt = buffers_to_iovec(iov, buffers, nblocks_this_segment);
984 :
985 : Assert(iovcnt <= nblocks_this_segment);
986 :
987 2423478 : if (!(io_direct_flags & IO_DIRECT_DATA))
988 2420692 : pgaio_io_set_flag(ioh, PGAIO_HF_BUFFERED);
989 :
990 2423478 : pgaio_io_set_target_smgr(ioh,
991 : reln,
992 : forknum,
993 : blocknum,
994 : nblocks,
995 : false);
996 2423478 : pgaio_io_register_callbacks(ioh, PGAIO_HCB_MD_READV, 0);
997 :
998 2423478 : ret = FileStartReadV(ioh, v->mdfd_vfd, iovcnt, seekpos, WAIT_EVENT_DATA_FILE_READ);
999 2423478 : if (ret != 0)
1000 0 : ereport(ERROR,
1001 : (errcode_for_file_access(),
1002 : errmsg("could not start reading blocks %u..%u in file \"%s\": %m",
1003 : blocknum,
1004 : blocknum + nblocks_this_segment - 1,
1005 : FilePathName(v->mdfd_vfd))));
1006 :
1007 : /*
1008 : * The error checks corresponding to the post-read checks in mdreadv() are
1009 : * in md_readv_complete().
1010 : */
1011 2423478 : }
1012 :
1013 : /*
1014 : * mdwritev() -- Write the supplied blocks at the appropriate location.
1015 : *
1016 : * This is to be used only for updating already-existing blocks of a
1017 : * relation (ie, those before the current EOF). To extend a relation,
1018 : * use mdextend().
1019 : */
1020 : void
1021 1013274 : mdwritev(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
1022 : const void **buffers, BlockNumber nblocks, bool skipFsync)
1023 : {
1024 : /* This assert is too expensive to have on normally ... */
1025 : #ifdef CHECK_WRITE_VS_EXTEND
1026 : Assert((uint64) blocknum + (uint64) nblocks <= (uint64) mdnblocks(reln, forknum));
1027 : #endif
1028 :
1029 2026548 : while (nblocks > 0)
1030 : {
1031 : struct iovec iov[PG_IOV_MAX];
1032 : int iovcnt;
1033 : off_t seekpos;
1034 : int nbytes;
1035 : MdfdVec *v;
1036 : BlockNumber nblocks_this_segment;
1037 : size_t transferred_this_segment;
1038 : size_t size_this_segment;
1039 :
1040 1013274 : v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
1041 : EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
1042 :
1043 1013274 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1044 :
1045 : Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
1046 :
1047 1013274 : nblocks_this_segment =
1048 1013274 : Min(nblocks,
1049 : RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE)));
1050 1013274 : nblocks_this_segment = Min(nblocks_this_segment, lengthof(iov));
1051 :
1052 1013274 : if (nblocks_this_segment != nblocks)
1053 0 : elog(ERROR, "write crosses segment boundary");
1054 :
1055 1013274 : iovcnt = buffers_to_iovec(iov, (void **) buffers, nblocks_this_segment);
1056 1013274 : size_this_segment = nblocks_this_segment * BLCKSZ;
1057 1013274 : transferred_this_segment = 0;
1058 :
1059 : /*
1060 : * Inner loop to continue after a short write. If the reason is that
1061 : * we're out of disk space, a future attempt should get an ENOSPC
1062 : * error from the kernel.
1063 : */
1064 : for (;;)
1065 : {
1066 : TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
1067 : reln->smgr_rlocator.locator.spcOid,
1068 : reln->smgr_rlocator.locator.dbOid,
1069 : reln->smgr_rlocator.locator.relNumber,
1070 : reln->smgr_rlocator.backend);
1071 1013274 : nbytes = FileWriteV(v->mdfd_vfd, iov, iovcnt, seekpos,
1072 : WAIT_EVENT_DATA_FILE_WRITE);
1073 : TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
1074 : reln->smgr_rlocator.locator.spcOid,
1075 : reln->smgr_rlocator.locator.dbOid,
1076 : reln->smgr_rlocator.locator.relNumber,
1077 : reln->smgr_rlocator.backend,
1078 : nbytes,
1079 : size_this_segment - transferred_this_segment);
1080 :
1081 : #ifdef SIMULATE_SHORT_WRITE
1082 : nbytes = Min(nbytes, 4096);
1083 : #endif
1084 :
1085 1013274 : if (nbytes < 0)
1086 : {
1087 0 : bool enospc = errno == ENOSPC;
1088 :
1089 0 : ereport(ERROR,
1090 : (errcode_for_file_access(),
1091 : errmsg("could not write blocks %u..%u in file \"%s\": %m",
1092 : blocknum,
1093 : blocknum + nblocks_this_segment - 1,
1094 : FilePathName(v->mdfd_vfd)),
1095 : enospc ? errhint("Check free disk space.") : 0));
1096 : }
1097 :
1098 : /* One loop should usually be enough. */
1099 1013274 : transferred_this_segment += nbytes;
1100 : Assert(transferred_this_segment <= size_this_segment);
1101 1013274 : if (transferred_this_segment == size_this_segment)
1102 1013274 : break;
1103 :
1104 : /* Adjust position and iovecs after a short write. */
1105 0 : seekpos += nbytes;
1106 0 : iovcnt = compute_remaining_iovec(iov, iov, iovcnt, nbytes);
1107 : }
1108 :
1109 1013274 : if (!skipFsync && !SmgrIsTemp(reln))
1110 1007096 : register_dirty_segment(reln, forknum, v);
1111 :
1112 1013274 : nblocks -= nblocks_this_segment;
1113 1013274 : buffers += nblocks_this_segment;
1114 1013274 : blocknum += nblocks_this_segment;
1115 : }
1116 1013274 : }
1117 :
1118 :
1119 : /*
1120 : * mdwriteback() -- Tell the kernel to write pages back to storage.
1121 : *
1122 : * This accepts a range of blocks because flushing several pages at once is
1123 : * considerably more efficient than doing so individually.
1124 : */
1125 : void
1126 0 : mdwriteback(SMgrRelation reln, ForkNumber forknum,
1127 : BlockNumber blocknum, BlockNumber nblocks)
1128 : {
1129 : Assert((io_direct_flags & IO_DIRECT_DATA) == 0);
1130 :
1131 : /*
1132 : * Issue flush requests in as few requests as possible; have to split at
1133 : * segment boundaries though, since those are actually separate files.
1134 : */
1135 0 : while (nblocks > 0)
1136 : {
1137 0 : BlockNumber nflush = nblocks;
1138 : off_t seekpos;
1139 : MdfdVec *v;
1140 : int segnum_start,
1141 : segnum_end;
1142 :
1143 0 : v = _mdfd_getseg(reln, forknum, blocknum, true /* not used */ ,
1144 : EXTENSION_DONT_OPEN);
1145 :
1146 : /*
1147 : * We might be flushing buffers of already removed relations, that's
1148 : * ok, just ignore that case. If the segment file wasn't open already
1149 : * (ie from a recent mdwrite()), then we don't want to re-open it, to
1150 : * avoid a race with PROCSIGNAL_BARRIER_SMGRRELEASE that might leave
1151 : * us with a descriptor to a file that is about to be unlinked.
1152 : */
1153 0 : if (!v)
1154 0 : return;
1155 :
1156 : /* compute offset inside the current segment */
1157 0 : segnum_start = blocknum / RELSEG_SIZE;
1158 :
1159 : /* compute number of desired writes within the current segment */
1160 0 : segnum_end = (blocknum + nblocks - 1) / RELSEG_SIZE;
1161 0 : if (segnum_start != segnum_end)
1162 0 : nflush = RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE));
1163 :
1164 : Assert(nflush >= 1);
1165 : Assert(nflush <= nblocks);
1166 :
1167 0 : seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1168 :
1169 0 : FileWriteback(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * nflush, WAIT_EVENT_DATA_FILE_FLUSH);
1170 :
1171 0 : nblocks -= nflush;
1172 0 : blocknum += nflush;
1173 : }
1174 : }
1175 :
1176 : /*
1177 : * mdnblocks() -- Get the number of blocks stored in a relation.
1178 : *
1179 : * Important side effect: all active segments of the relation are opened
1180 : * and added to the md_seg_fds array. If this routine has not been
1181 : * called, then only segments up to the last one actually touched
1182 : * are present in the array.
1183 : */
1184 : BlockNumber
1185 4286916 : mdnblocks(SMgrRelation reln, ForkNumber forknum)
1186 : {
1187 : MdfdVec *v;
1188 : BlockNumber nblocks;
1189 : BlockNumber segno;
1190 :
1191 4286916 : mdopenfork(reln, forknum, EXTENSION_FAIL);
1192 :
1193 : /* mdopen has opened the first segment */
1194 : Assert(reln->md_num_open_segs[forknum] > 0);
1195 :
1196 : /*
1197 : * Start from the last open segments, to avoid redundant seeks. We have
1198 : * previously verified that these segments are exactly RELSEG_SIZE long,
1199 : * and it's useless to recheck that each time.
1200 : *
1201 : * NOTE: this assumption could only be wrong if another backend has
1202 : * truncated the relation. We rely on higher code levels to handle that
1203 : * scenario by closing and re-opening the md fd, which is handled via
1204 : * relcache flush. (Since the checkpointer doesn't participate in
1205 : * relcache flush, it could have segment entries for inactive segments;
1206 : * that's OK because the checkpointer never needs to compute relation
1207 : * size.)
1208 : */
1209 4286878 : segno = reln->md_num_open_segs[forknum] - 1;
1210 4286878 : v = &reln->md_seg_fds[forknum][segno];
1211 :
1212 : for (;;)
1213 : {
1214 4286878 : nblocks = _mdnblocks(reln, forknum, v);
1215 4286878 : if (nblocks > ((BlockNumber) RELSEG_SIZE))
1216 0 : elog(FATAL, "segment too big");
1217 4286878 : if (nblocks < ((BlockNumber) RELSEG_SIZE))
1218 4286878 : return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
1219 :
1220 : /*
1221 : * If segment is exactly RELSEG_SIZE, advance to next one.
1222 : */
1223 0 : segno++;
1224 :
1225 : /*
1226 : * We used to pass O_CREAT here, but that has the disadvantage that it
1227 : * might create a segment which has vanished through some operating
1228 : * system misadventure. In such a case, creating the segment here
1229 : * undermines _mdfd_getseg's attempts to notice and report an error
1230 : * upon access to a missing segment.
1231 : */
1232 0 : v = _mdfd_openseg(reln, forknum, segno, 0);
1233 0 : if (v == NULL)
1234 0 : return segno * ((BlockNumber) RELSEG_SIZE);
1235 : }
1236 : }
1237 :
1238 : /*
1239 : * mdtruncate() -- Truncate relation to specified number of blocks.
1240 : *
1241 : * Guaranteed not to allocate memory, so it can be used in a critical section.
1242 : * Caller must have called smgrnblocks() to obtain curnblk while holding a
1243 : * sufficient lock to prevent a change in relation size, and not used any smgr
1244 : * functions for this relation or handled interrupts in between. This makes
1245 : * sure we have opened all active segments, so that truncate loop will get
1246 : * them all!
1247 : */
1248 : void
1249 1682 : mdtruncate(SMgrRelation reln, ForkNumber forknum,
1250 : BlockNumber curnblk, BlockNumber nblocks)
1251 : {
1252 : BlockNumber priorblocks;
1253 : int curopensegs;
1254 :
1255 1682 : if (nblocks > curnblk)
1256 : {
1257 : /* Bogus request ... but no complaint if InRecovery */
1258 0 : if (InRecovery)
1259 0 : return;
1260 0 : ereport(ERROR,
1261 : (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
1262 : relpath(reln->smgr_rlocator, forknum).str,
1263 : nblocks, curnblk)));
1264 : }
1265 1682 : if (nblocks == curnblk)
1266 660 : return; /* no work */
1267 :
1268 : /*
1269 : * Truncate segments, starting at the last one. Starting at the end makes
1270 : * managing the memory for the fd array easier, should there be errors.
1271 : */
1272 1022 : curopensegs = reln->md_num_open_segs[forknum];
1273 2044 : while (curopensegs > 0)
1274 : {
1275 : MdfdVec *v;
1276 :
1277 1022 : priorblocks = (curopensegs - 1) * RELSEG_SIZE;
1278 :
1279 1022 : v = &reln->md_seg_fds[forknum][curopensegs - 1];
1280 :
1281 1022 : if (priorblocks > nblocks)
1282 : {
1283 : /*
1284 : * This segment is no longer active. We truncate the file, but do
1285 : * not delete it, for reasons explained in the header comments.
1286 : */
1287 0 : if (FileTruncate(v->mdfd_vfd, 0, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
1288 0 : ereport(ERROR,
1289 : (errcode_for_file_access(),
1290 : errmsg("could not truncate file \"%s\": %m",
1291 : FilePathName(v->mdfd_vfd))));
1292 :
1293 0 : if (!SmgrIsTemp(reln))
1294 0 : register_dirty_segment(reln, forknum, v);
1295 :
1296 : /* we never drop the 1st segment */
1297 : Assert(v != &reln->md_seg_fds[forknum][0]);
1298 :
1299 0 : FileClose(v->mdfd_vfd);
1300 0 : _fdvec_resize(reln, forknum, curopensegs - 1);
1301 : }
1302 1022 : else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
1303 : {
1304 : /*
1305 : * This is the last segment we want to keep. Truncate the file to
1306 : * the right length. NOTE: if nblocks is exactly a multiple K of
1307 : * RELSEG_SIZE, we will truncate the K+1st segment to 0 length but
1308 : * keep it. This adheres to the invariant given in the header
1309 : * comments.
1310 : */
1311 1022 : BlockNumber lastsegblocks = nblocks - priorblocks;
1312 :
1313 1022 : if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
1314 0 : ereport(ERROR,
1315 : (errcode_for_file_access(),
1316 : errmsg("could not truncate file \"%s\" to %u blocks: %m",
1317 : FilePathName(v->mdfd_vfd),
1318 : nblocks)));
1319 1022 : if (!SmgrIsTemp(reln))
1320 730 : register_dirty_segment(reln, forknum, v);
1321 : }
1322 : else
1323 : {
1324 : /*
1325 : * We still need this segment, so nothing to do for this and any
1326 : * earlier segment.
1327 : */
1328 0 : break;
1329 : }
1330 1022 : curopensegs--;
1331 : }
1332 : }
1333 :
1334 : /*
1335 : * mdregistersync() -- Mark whole relation as needing fsync
1336 : */
1337 : void
1338 47050 : mdregistersync(SMgrRelation reln, ForkNumber forknum)
1339 : {
1340 : int segno;
1341 : int min_inactive_seg;
1342 :
1343 : /*
1344 : * NOTE: mdnblocks makes sure we have opened all active segments, so that
1345 : * the loop below will get them all!
1346 : */
1347 47050 : mdnblocks(reln, forknum);
1348 :
1349 47050 : min_inactive_seg = segno = reln->md_num_open_segs[forknum];
1350 :
1351 : /*
1352 : * Temporarily open inactive segments, then close them after sync. There
1353 : * may be some inactive segments left opened after error, but that is
1354 : * harmless. We don't bother to clean them up and take a risk of further
1355 : * trouble. The next mdclose() will soon close them.
1356 : */
1357 47050 : while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
1358 0 : segno++;
1359 :
1360 94100 : while (segno > 0)
1361 : {
1362 47050 : MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1363 :
1364 47050 : register_dirty_segment(reln, forknum, v);
1365 :
1366 : /* Close inactive segments immediately */
1367 47050 : if (segno > min_inactive_seg)
1368 : {
1369 0 : FileClose(v->mdfd_vfd);
1370 0 : _fdvec_resize(reln, forknum, segno - 1);
1371 : }
1372 :
1373 47050 : segno--;
1374 : }
1375 47050 : }
1376 :
1377 : /*
1378 : * mdimmedsync() -- Immediately sync a relation to stable storage.
1379 : *
1380 : * Note that only writes already issued are synced; this routine knows
1381 : * nothing of dirty buffers that may exist inside the buffer manager. We
1382 : * sync active and inactive segments; smgrDoPendingSyncs() relies on this.
1383 : * Consider a relation skipping WAL. Suppose a checkpoint syncs blocks of
1384 : * some segment, then mdtruncate() renders that segment inactive. If we
1385 : * crash before the next checkpoint syncs the newly-inactive segment, that
1386 : * segment may survive recovery, reintroducing unwanted data into the table.
1387 : */
1388 : void
1389 24 : mdimmedsync(SMgrRelation reln, ForkNumber forknum)
1390 : {
1391 : int segno;
1392 : int min_inactive_seg;
1393 :
1394 : /*
1395 : * NOTE: mdnblocks makes sure we have opened all active segments, so that
1396 : * the loop below will get them all!
1397 : */
1398 24 : mdnblocks(reln, forknum);
1399 :
1400 24 : min_inactive_seg = segno = reln->md_num_open_segs[forknum];
1401 :
1402 : /*
1403 : * Temporarily open inactive segments, then close them after sync. There
1404 : * may be some inactive segments left opened after fsync() error, but that
1405 : * is harmless. We don't bother to clean them up and take a risk of
1406 : * further trouble. The next mdclose() will soon close them.
1407 : */
1408 24 : while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
1409 0 : segno++;
1410 :
1411 48 : while (segno > 0)
1412 : {
1413 24 : MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
1414 :
1415 : /*
1416 : * fsyncs done through mdimmedsync() should be tracked in a separate
1417 : * IOContext than those done through mdsyncfiletag() to differentiate
1418 : * between unavoidable client backend fsyncs (e.g. those done during
1419 : * index build) and those which ideally would have been done by the
1420 : * checkpointer. Since other IO operations bypassing the buffer
1421 : * manager could also be tracked in such an IOContext, wait until
1422 : * these are also tracked to track immediate fsyncs.
1423 : */
1424 24 : if (FileSync(v->mdfd_vfd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0)
1425 0 : ereport(data_sync_elevel(ERROR),
1426 : (errcode_for_file_access(),
1427 : errmsg("could not fsync file \"%s\": %m",
1428 : FilePathName(v->mdfd_vfd))));
1429 :
1430 : /* Close inactive segments immediately */
1431 24 : if (segno > min_inactive_seg)
1432 : {
1433 0 : FileClose(v->mdfd_vfd);
1434 0 : _fdvec_resize(reln, forknum, segno - 1);
1435 : }
1436 :
1437 24 : segno--;
1438 : }
1439 24 : }
1440 :
1441 : int
1442 972766 : mdfd(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, uint32 *off)
1443 : {
1444 972766 : MdfdVec *v = mdopenfork(reln, forknum, EXTENSION_FAIL);
1445 :
1446 972766 : v = _mdfd_getseg(reln, forknum, blocknum, false,
1447 : EXTENSION_FAIL);
1448 :
1449 972766 : *off = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
1450 :
1451 : Assert(*off < (off_t) BLCKSZ * RELSEG_SIZE);
1452 :
1453 972766 : return FileGetRawDesc(v->mdfd_vfd);
1454 : }
1455 :
1456 : /*
1457 : * register_dirty_segment() -- Mark a relation segment as needing fsync
1458 : *
1459 : * If there is a local pending-ops table, just make an entry in it for
1460 : * ProcessSyncRequests to process later. Otherwise, try to pass off the
1461 : * fsync request to the checkpointer process. If that fails, just do the
1462 : * fsync locally before returning (we hope this will not happen often
1463 : * enough to be a performance problem).
1464 : */
1465 : static void
1466 1739652 : register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1467 : {
1468 : FileTag tag;
1469 :
1470 1739652 : INIT_MD_FILETAG(tag, reln->smgr_rlocator.locator, forknum, seg->mdfd_segno);
1471 :
1472 : /* Temp relations should never be fsync'd */
1473 : Assert(!SmgrIsTemp(reln));
1474 :
1475 1739652 : if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */ ))
1476 : {
1477 : instr_time io_start;
1478 :
1479 1228 : ereport(DEBUG1,
1480 : (errmsg_internal("could not forward fsync request because request queue is full")));
1481 :
1482 1228 : io_start = pgstat_prepare_io_time(track_io_timing);
1483 :
1484 1228 : if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0)
1485 0 : ereport(data_sync_elevel(ERROR),
1486 : (errcode_for_file_access(),
1487 : errmsg("could not fsync file \"%s\": %m",
1488 : FilePathName(seg->mdfd_vfd))));
1489 :
1490 : /*
1491 : * We have no way of knowing if the current IOContext is
1492 : * IOCONTEXT_NORMAL or IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] at this
1493 : * point, so count the fsync as being in the IOCONTEXT_NORMAL
1494 : * IOContext. This is probably okay, because the number of backend
1495 : * fsyncs doesn't say anything about the efficacy of the
1496 : * BufferAccessStrategy. And counting both fsyncs done in
1497 : * IOCONTEXT_NORMAL and IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] under
1498 : * IOCONTEXT_NORMAL is likely clearer when investigating the number of
1499 : * backend fsyncs.
1500 : */
1501 1228 : pgstat_count_io_op_time(IOOBJECT_RELATION, IOCONTEXT_NORMAL,
1502 : IOOP_FSYNC, io_start, 1, 0);
1503 : }
1504 1739652 : }
1505 :
1506 : /*
1507 : * register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
1508 : */
1509 : static void
1510 68474 : register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
1511 : BlockNumber segno)
1512 : {
1513 : FileTag tag;
1514 :
1515 68474 : INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
1516 :
1517 : /* Should never be used with temp relations */
1518 : Assert(!RelFileLocatorBackendIsTemp(rlocator));
1519 :
1520 68474 : RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
1521 68474 : }
1522 :
1523 : /*
1524 : * register_forget_request() -- forget any fsyncs for a relation fork's segment
1525 : */
1526 : static void
1527 265390 : register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
1528 : BlockNumber segno)
1529 : {
1530 : FileTag tag;
1531 :
1532 265390 : INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
1533 :
1534 265390 : RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
1535 265390 : }
1536 :
1537 : /*
1538 : * ForgetDatabaseSyncRequests -- forget any fsyncs and unlinks for a DB
1539 : */
1540 : void
1541 114 : ForgetDatabaseSyncRequests(Oid dbid)
1542 : {
1543 : FileTag tag;
1544 : RelFileLocator rlocator;
1545 :
1546 114 : rlocator.dbOid = dbid;
1547 114 : rlocator.spcOid = 0;
1548 114 : rlocator.relNumber = 0;
1549 :
1550 114 : INIT_MD_FILETAG(tag, rlocator, InvalidForkNumber, InvalidBlockNumber);
1551 :
1552 114 : RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
1553 114 : }
1554 :
1555 : /*
1556 : * DropRelationFiles -- drop files of all given relations
1557 : */
1558 : void
1559 5160 : DropRelationFiles(RelFileLocator *delrels, int ndelrels, bool isRedo)
1560 : {
1561 : SMgrRelation *srels;
1562 : int i;
1563 :
1564 5160 : srels = palloc(sizeof(SMgrRelation) * ndelrels);
1565 20104 : for (i = 0; i < ndelrels; i++)
1566 : {
1567 14944 : SMgrRelation srel = smgropen(delrels[i], INVALID_PROC_NUMBER);
1568 :
1569 14944 : if (isRedo)
1570 : {
1571 : ForkNumber fork;
1572 :
1573 74480 : for (fork = 0; fork <= MAX_FORKNUM; fork++)
1574 59584 : XLogDropRelation(delrels[i], fork);
1575 : }
1576 14944 : srels[i] = srel;
1577 : }
1578 :
1579 5160 : smgrdounlinkall(srels, ndelrels, isRedo);
1580 :
1581 20104 : for (i = 0; i < ndelrels; i++)
1582 14944 : smgrclose(srels[i]);
1583 5160 : pfree(srels);
1584 5160 : }
1585 :
1586 :
1587 : /*
1588 : * _fdvec_resize() -- Resize the fork's open segments array
1589 : */
1590 : static void
1591 2858510 : _fdvec_resize(SMgrRelation reln,
1592 : ForkNumber forknum,
1593 : int nseg)
1594 : {
1595 2858510 : if (nseg == 0)
1596 : {
1597 1029494 : if (reln->md_num_open_segs[forknum] > 0)
1598 : {
1599 1029494 : pfree(reln->md_seg_fds[forknum]);
1600 1029494 : reln->md_seg_fds[forknum] = NULL;
1601 : }
1602 : }
1603 1829016 : else if (reln->md_num_open_segs[forknum] == 0)
1604 : {
1605 1829016 : reln->md_seg_fds[forknum] =
1606 1829016 : MemoryContextAlloc(MdCxt, sizeof(MdfdVec) * nseg);
1607 : }
1608 0 : else if (nseg > reln->md_num_open_segs[forknum])
1609 : {
1610 : /*
1611 : * It doesn't seem worthwhile complicating the code to amortize
1612 : * repalloc() calls. Those are far faster than PathNameOpenFile() or
1613 : * FileClose(), and the memory context internally will sometimes avoid
1614 : * doing an actual reallocation.
1615 : */
1616 0 : reln->md_seg_fds[forknum] =
1617 0 : repalloc(reln->md_seg_fds[forknum],
1618 : sizeof(MdfdVec) * nseg);
1619 : }
1620 : else
1621 : {
1622 : /*
1623 : * We don't reallocate a smaller array, because we want mdtruncate()
1624 : * to be able to promise that it won't allocate memory, so that it is
1625 : * allowed in a critical section. This means that a bit of space in
1626 : * the array is now wasted, until the next time we add a segment and
1627 : * reallocate.
1628 : */
1629 : }
1630 :
1631 2858510 : reln->md_num_open_segs[forknum] = nseg;
1632 2858510 : }
1633 :
1634 : /*
1635 : * Return the filename for the specified segment of the relation. The
1636 : * returned string is palloc'd.
1637 : */
1638 : static MdPathStr
1639 47098 : _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
1640 : {
1641 : RelPathStr path;
1642 : MdPathStr fullpath;
1643 :
1644 47098 : path = relpath(reln->smgr_rlocator, forknum);
1645 :
1646 47098 : if (segno > 0)
1647 47098 : sprintf(fullpath.str, "%s.%u", path.str, segno);
1648 : else
1649 0 : strcpy(fullpath.str, path.str);
1650 :
1651 47098 : return fullpath;
1652 : }
1653 :
1654 : /*
1655 : * Open the specified segment of the relation,
1656 : * and make a MdfdVec object for it. Returns NULL on failure.
1657 : */
1658 : static MdfdVec *
1659 47074 : _mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber segno,
1660 : int oflags)
1661 : {
1662 : MdfdVec *v;
1663 : File fd;
1664 : MdPathStr fullpath;
1665 :
1666 47074 : fullpath = _mdfd_segpath(reln, forknum, segno);
1667 :
1668 : /* open the file */
1669 47074 : fd = PathNameOpenFile(fullpath.str, _mdfd_open_flags() | oflags);
1670 :
1671 47074 : if (fd < 0)
1672 47074 : return NULL;
1673 :
1674 : /*
1675 : * Segments are always opened in order from lowest to highest, so we must
1676 : * be adding a new one at the end.
1677 : */
1678 : Assert(segno == reln->md_num_open_segs[forknum]);
1679 :
1680 0 : _fdvec_resize(reln, forknum, segno + 1);
1681 :
1682 : /* fill the entry */
1683 0 : v = &reln->md_seg_fds[forknum][segno];
1684 0 : v->mdfd_vfd = fd;
1685 0 : v->mdfd_segno = segno;
1686 :
1687 : Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
1688 :
1689 : /* all done */
1690 0 : return v;
1691 : }
1692 :
1693 : /*
1694 : * _mdfd_getseg() -- Find the segment of the relation holding the
1695 : * specified block.
1696 : *
1697 : * If the segment doesn't exist, we ereport, return NULL, or create the
1698 : * segment, according to "behavior". Note: skipFsync is only used in the
1699 : * EXTENSION_CREATE case.
1700 : */
1701 : static MdfdVec *
1702 5062156 : _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
1703 : bool skipFsync, int behavior)
1704 : {
1705 : MdfdVec *v;
1706 : BlockNumber targetseg;
1707 : BlockNumber nextsegno;
1708 :
1709 : /* some way to handle non-existent segments needs to be specified */
1710 : Assert(behavior &
1711 : (EXTENSION_FAIL | EXTENSION_CREATE | EXTENSION_RETURN_NULL |
1712 : EXTENSION_DONT_OPEN));
1713 :
1714 5062156 : targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
1715 :
1716 : /* if an existing and opened segment, we're done */
1717 5062156 : if (targetseg < reln->md_num_open_segs[forknum])
1718 : {
1719 4656460 : v = &reln->md_seg_fds[forknum][targetseg];
1720 4656460 : return v;
1721 : }
1722 :
1723 : /* The caller only wants the segment if we already had it open. */
1724 405696 : if (behavior & EXTENSION_DONT_OPEN)
1725 0 : return NULL;
1726 :
1727 : /*
1728 : * The target segment is not yet open. Iterate over all the segments
1729 : * between the last opened and the target segment. This way missing
1730 : * segments either raise an error, or get created (according to
1731 : * 'behavior'). Start with either the last opened, or the first segment if
1732 : * none was opened before.
1733 : */
1734 405696 : if (reln->md_num_open_segs[forknum] > 0)
1735 24 : v = &reln->md_seg_fds[forknum][reln->md_num_open_segs[forknum] - 1];
1736 : else
1737 : {
1738 405672 : v = mdopenfork(reln, forknum, behavior);
1739 405666 : if (!v)
1740 0 : return NULL; /* if behavior & EXTENSION_RETURN_NULL */
1741 : }
1742 :
1743 405690 : for (nextsegno = reln->md_num_open_segs[forknum];
1744 0 : nextsegno <= targetseg; nextsegno++)
1745 : {
1746 24 : BlockNumber nblocks = _mdnblocks(reln, forknum, v);
1747 24 : int flags = 0;
1748 :
1749 : Assert(nextsegno == v->mdfd_segno + 1);
1750 :
1751 24 : if (nblocks > ((BlockNumber) RELSEG_SIZE))
1752 0 : elog(FATAL, "segment too big");
1753 :
1754 24 : if ((behavior & EXTENSION_CREATE) ||
1755 24 : (InRecovery && (behavior & EXTENSION_CREATE_RECOVERY)))
1756 : {
1757 : /*
1758 : * Normally we will create new segments only if authorized by the
1759 : * caller (i.e., we are doing mdextend()). But when doing WAL
1760 : * recovery, create segments anyway; this allows cases such as
1761 : * replaying WAL data that has a write into a high-numbered
1762 : * segment of a relation that was later deleted. We want to go
1763 : * ahead and create the segments so we can finish out the replay.
1764 : *
1765 : * We have to maintain the invariant that segments before the last
1766 : * active segment are of size RELSEG_SIZE; therefore, if
1767 : * extending, pad them out with zeroes if needed. (This only
1768 : * matters if in recovery, or if the caller is extending the
1769 : * relation discontiguously, but that can happen in hash indexes.)
1770 : */
1771 0 : if (nblocks < ((BlockNumber) RELSEG_SIZE))
1772 : {
1773 0 : char *zerobuf = palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE,
1774 : MCXT_ALLOC_ZERO);
1775 :
1776 0 : mdextend(reln, forknum,
1777 0 : nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
1778 : zerobuf, skipFsync);
1779 0 : pfree(zerobuf);
1780 : }
1781 0 : flags = O_CREAT;
1782 : }
1783 24 : else if (nblocks < ((BlockNumber) RELSEG_SIZE))
1784 : {
1785 : /*
1786 : * When not extending, only open the next segment if the current
1787 : * one is exactly RELSEG_SIZE. If not (this branch), either
1788 : * return NULL or fail.
1789 : */
1790 24 : if (behavior & EXTENSION_RETURN_NULL)
1791 : {
1792 : /*
1793 : * Some callers discern between reasons for _mdfd_getseg()
1794 : * returning NULL based on errno. As there's no failing
1795 : * syscall involved in this case, explicitly set errno to
1796 : * ENOENT, as that seems the closest interpretation.
1797 : */
1798 0 : errno = ENOENT;
1799 0 : return NULL;
1800 : }
1801 :
1802 24 : ereport(ERROR,
1803 : (errcode_for_file_access(),
1804 : errmsg("could not open file \"%s\" (target block %u): previous segment is only %u blocks",
1805 : _mdfd_segpath(reln, forknum, nextsegno).str,
1806 : blkno, nblocks)));
1807 : }
1808 :
1809 0 : v = _mdfd_openseg(reln, forknum, nextsegno, flags);
1810 :
1811 0 : if (v == NULL)
1812 : {
1813 0 : if ((behavior & EXTENSION_RETURN_NULL) &&
1814 0 : FILE_POSSIBLY_DELETED(errno))
1815 0 : return NULL;
1816 0 : ereport(ERROR,
1817 : (errcode_for_file_access(),
1818 : errmsg("could not open file \"%s\" (target block %u): %m",
1819 : _mdfd_segpath(reln, forknum, nextsegno).str,
1820 : blkno)));
1821 : }
1822 : }
1823 :
1824 405666 : return v;
1825 : }
1826 :
1827 : /*
1828 : * Get number of blocks present in a single disk file
1829 : */
1830 : static BlockNumber
1831 4286902 : _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1832 : {
1833 : off_t len;
1834 :
1835 4286902 : len = FileSize(seg->mdfd_vfd);
1836 4286902 : if (len < 0)
1837 0 : ereport(ERROR,
1838 : (errcode_for_file_access(),
1839 : errmsg("could not seek to end of file \"%s\": %m",
1840 : FilePathName(seg->mdfd_vfd))));
1841 : /* note that this calculation will ignore any partial block at EOF */
1842 4286902 : return (BlockNumber) (len / BLCKSZ);
1843 : }
1844 :
1845 : /*
1846 : * Sync a file to disk, given a file tag. Write the path into an output
1847 : * buffer so the caller can use it in error messages.
1848 : *
1849 : * Return 0 on success, -1 on failure, with errno set.
1850 : */
1851 : int
1852 0 : mdsyncfiletag(const FileTag *ftag, char *path)
1853 : {
1854 0 : SMgrRelation reln = smgropen(ftag->rlocator, INVALID_PROC_NUMBER);
1855 : File file;
1856 : instr_time io_start;
1857 : bool need_to_close;
1858 : int result,
1859 : save_errno;
1860 :
1861 : /* See if we already have the file open, or need to open it. */
1862 0 : if (ftag->segno < reln->md_num_open_segs[ftag->forknum])
1863 : {
1864 0 : file = reln->md_seg_fds[ftag->forknum][ftag->segno].mdfd_vfd;
1865 0 : strlcpy(path, FilePathName(file), MAXPGPATH);
1866 0 : need_to_close = false;
1867 : }
1868 : else
1869 : {
1870 : MdPathStr p;
1871 :
1872 0 : p = _mdfd_segpath(reln, ftag->forknum, ftag->segno);
1873 0 : strlcpy(path, p.str, MD_PATH_STR_MAXLEN);
1874 :
1875 0 : file = PathNameOpenFile(path, _mdfd_open_flags());
1876 0 : if (file < 0)
1877 0 : return -1;
1878 0 : need_to_close = true;
1879 : }
1880 :
1881 0 : io_start = pgstat_prepare_io_time(track_io_timing);
1882 :
1883 : /* Sync the file. */
1884 0 : result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC);
1885 0 : save_errno = errno;
1886 :
1887 0 : if (need_to_close)
1888 0 : FileClose(file);
1889 :
1890 0 : pgstat_count_io_op_time(IOOBJECT_RELATION, IOCONTEXT_NORMAL,
1891 : IOOP_FSYNC, io_start, 1, 0);
1892 :
1893 0 : errno = save_errno;
1894 0 : return result;
1895 : }
1896 :
1897 : /*
1898 : * Unlink a file, given a file tag. Write the path into an output
1899 : * buffer so the caller can use it in error messages.
1900 : *
1901 : * Return 0 on success, -1 on failure, with errno set.
1902 : */
1903 : int
1904 64560 : mdunlinkfiletag(const FileTag *ftag, char *path)
1905 : {
1906 : RelPathStr p;
1907 :
1908 : /* Compute the path. */
1909 64560 : p = relpathperm(ftag->rlocator, MAIN_FORKNUM);
1910 64560 : strlcpy(path, p.str, MAXPGPATH);
1911 :
1912 : /* Try to unlink the file. */
1913 64560 : return unlink(path);
1914 : }
1915 :
1916 : /*
1917 : * Check if a given candidate request matches a given tag, when processing
1918 : * a SYNC_FILTER_REQUEST request. This will be called for all pending
1919 : * requests to find out whether to forget them.
1920 : */
1921 : bool
1922 12240 : mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
1923 : {
1924 : /*
1925 : * For now we only use filter requests as a way to drop all scheduled
1926 : * callbacks relating to a given database, when dropping the database.
1927 : * We'll return true for all candidates that have the same database OID as
1928 : * the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
1929 : */
1930 12240 : return ftag->rlocator.dbOid == candidate->rlocator.dbOid;
1931 : }
1932 :
1933 : /*
1934 : * AIO completion callback for mdstartreadv().
1935 : */
1936 : static PgAioResult
1937 2213852 : md_readv_complete(PgAioHandle *ioh, PgAioResult prior_result, uint8 cb_data)
1938 : {
1939 2213852 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
1940 2213852 : PgAioResult result = prior_result;
1941 :
1942 2213852 : if (prior_result.result < 0)
1943 : {
1944 0 : result.status = PGAIO_RS_ERROR;
1945 0 : result.id = PGAIO_HCB_MD_READV;
1946 : /* For "hard" errors, track the error number in error_data */
1947 0 : result.error_data = -prior_result.result;
1948 0 : result.result = 0;
1949 :
1950 : /*
1951 : * Immediately log a message about the IO error, but only to the
1952 : * server log. The reason to do so immediately is that the originator
1953 : * might not process the query result immediately (because it is busy
1954 : * doing another part of query processing) or at all (e.g. if it was
1955 : * cancelled or errored out due to another IO also failing). The
1956 : * issuer of the IO will emit an ERROR when processing the IO's
1957 : * results
1958 : */
1959 0 : pgaio_result_report(result, td, LOG_SERVER_ONLY);
1960 :
1961 0 : return result;
1962 : }
1963 :
1964 : /*
1965 : * As explained above smgrstartreadv(), the smgr API operates on the level
1966 : * of blocks, rather than bytes. Convert.
1967 : */
1968 2213852 : result.result /= BLCKSZ;
1969 :
1970 : Assert(result.result <= td->smgr.nblocks);
1971 :
1972 2213852 : if (result.result == 0)
1973 : {
1974 : /* consider 0 blocks read a failure */
1975 0 : result.status = PGAIO_RS_ERROR;
1976 0 : result.id = PGAIO_HCB_MD_READV;
1977 0 : result.error_data = 0;
1978 :
1979 : /* see comment above the "hard error" case */
1980 0 : pgaio_result_report(result, td, LOG_SERVER_ONLY);
1981 :
1982 0 : return result;
1983 : }
1984 :
1985 2213852 : if (result.status != PGAIO_RS_ERROR &&
1986 2213852 : result.result < td->smgr.nblocks)
1987 : {
1988 : /* partial reads should be retried at upper level */
1989 0 : result.status = PGAIO_RS_PARTIAL;
1990 0 : result.id = PGAIO_HCB_MD_READV;
1991 : }
1992 :
1993 2213852 : return result;
1994 : }
1995 :
1996 : /*
1997 : * AIO error reporting callback for mdstartreadv().
1998 : *
1999 : * Errors are encoded as follows:
2000 : * - PgAioResult.error_data != 0 encodes IO that failed with that errno
2001 : * - PgAioResult.error_data == 0 encodes IO that didn't read all data
2002 : */
2003 : static void
2004 0 : md_readv_report(PgAioResult result, const PgAioTargetData *td, int elevel)
2005 : {
2006 : RelPathStr path;
2007 :
2008 0 : path = relpathbackend(td->smgr.rlocator,
2009 : td->smgr.is_temp ? MyProcNumber : INVALID_PROC_NUMBER,
2010 : td->smgr.forkNum);
2011 :
2012 0 : if (result.error_data != 0)
2013 : {
2014 : /* for errcode_for_file_access() and %m */
2015 0 : errno = result.error_data;
2016 :
2017 0 : ereport(elevel,
2018 : errcode_for_file_access(),
2019 : errmsg("could not read blocks %u..%u in file \"%s\": %m",
2020 : td->smgr.blockNum,
2021 : td->smgr.blockNum + td->smgr.nblocks - 1,
2022 : path.str));
2023 : }
2024 : else
2025 : {
2026 : /*
2027 : * NB: This will typically only be output in debug messages, while
2028 : * retrying a partial IO.
2029 : */
2030 0 : ereport(elevel,
2031 : errcode(ERRCODE_DATA_CORRUPTED),
2032 : errmsg("could not read blocks %u..%u in file \"%s\": read only %zu of %zu bytes",
2033 : td->smgr.blockNum,
2034 : td->smgr.blockNum + td->smgr.nblocks - 1,
2035 : path.str,
2036 : result.result * (size_t) BLCKSZ,
2037 : td->smgr.nblocks * (size_t) BLCKSZ));
2038 : }
2039 0 : }
|