Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * method_worker.c
4 : * AIO - perform AIO using worker processes
5 : *
6 : * IO workers consume IOs from a shared memory submission queue, run
7 : * traditional synchronous system calls, and perform the shared completion
8 : * handling immediately. Client code submits most requests by pushing IOs
9 : * into the submission queue, and waits (if necessary) using condition
10 : * variables. Some IOs cannot be performed in another process due to lack of
11 : * infrastructure for reopening the file, and must processed synchronously by
12 : * the client code when submitted.
13 : *
14 : * So that the submitter can make just one system call when submitting a batch
15 : * of IOs, wakeups "fan out"; each woken IO worker can wake two more. XXX This
16 : * could be improved by using futexes instead of latches to wake N waiters.
17 : *
18 : * This method of AIO is available in all builds on all operating systems, and
19 : * is the default.
20 : *
21 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
22 : * Portions Copyright (c) 1994, Regents of the University of California
23 : *
24 : * IDENTIFICATION
25 : * src/backend/storage/aio/method_worker.c
26 : *
27 : *-------------------------------------------------------------------------
28 : */
29 :
30 : #include "postgres.h"
31 :
32 : #include "libpq/pqsignal.h"
33 : #include "miscadmin.h"
34 : #include "port/pg_bitutils.h"
35 : #include "postmaster/auxprocess.h"
36 : #include "postmaster/interrupt.h"
37 : #include "storage/aio.h"
38 : #include "storage/aio_internal.h"
39 : #include "storage/aio_subsys.h"
40 : #include "storage/io_worker.h"
41 : #include "storage/ipc.h"
42 : #include "storage/latch.h"
43 : #include "storage/proc.h"
44 : #include "tcop/tcopprot.h"
45 : #include "utils/injection_point.h"
46 : #include "utils/memdebug.h"
47 : #include "utils/ps_status.h"
48 : #include "utils/wait_event.h"
49 :
50 :
51 : /* How many workers should each worker wake up if needed? */
52 : #define IO_WORKER_WAKEUP_FANOUT 2
53 :
54 :
55 : typedef struct PgAioWorkerSubmissionQueue
56 : {
57 : uint32 size;
58 : uint32 head;
59 : uint32 tail;
60 : int sqes[FLEXIBLE_ARRAY_MEMBER];
61 : } PgAioWorkerSubmissionQueue;
62 :
63 : typedef struct PgAioWorkerSlot
64 : {
65 : Latch *latch;
66 : bool in_use;
67 : } PgAioWorkerSlot;
68 :
69 : typedef struct PgAioWorkerControl
70 : {
71 : uint64 idle_worker_mask;
72 : PgAioWorkerSlot workers[FLEXIBLE_ARRAY_MEMBER];
73 : } PgAioWorkerControl;
74 :
75 :
76 : static size_t pgaio_worker_shmem_size(void);
77 : static void pgaio_worker_shmem_init(bool first_time);
78 :
79 : static bool pgaio_worker_needs_synchronous_execution(PgAioHandle *ioh);
80 : static int pgaio_worker_submit(uint16 num_staged_ios, PgAioHandle **staged_ios);
81 :
82 :
83 : const IoMethodOps pgaio_worker_ops = {
84 : .shmem_size = pgaio_worker_shmem_size,
85 : .shmem_init = pgaio_worker_shmem_init,
86 :
87 : .needs_synchronous_execution = pgaio_worker_needs_synchronous_execution,
88 : .submit = pgaio_worker_submit,
89 : };
90 :
91 :
92 : /* GUCs */
93 : int io_workers = 3;
94 :
95 :
96 : static int io_worker_queue_size = 64;
97 : static int MyIoWorkerId;
98 : static PgAioWorkerSubmissionQueue *io_worker_submission_queue;
99 : static PgAioWorkerControl *io_worker_control;
100 :
101 :
102 : static size_t
103 6510 : pgaio_worker_queue_shmem_size(int *queue_size)
104 : {
105 : /* Round size up to next power of two so we can make a mask. */
106 6510 : *queue_size = pg_nextpower2_32(io_worker_queue_size);
107 :
108 13020 : return offsetof(PgAioWorkerSubmissionQueue, sqes) +
109 6510 : sizeof(int) * *queue_size;
110 : }
111 :
112 : static size_t
113 6510 : pgaio_worker_control_shmem_size(void)
114 : {
115 6510 : return offsetof(PgAioWorkerControl, workers) +
116 : sizeof(PgAioWorkerSlot) * MAX_IO_WORKERS;
117 : }
118 :
119 : static size_t
120 4240 : pgaio_worker_shmem_size(void)
121 : {
122 : size_t sz;
123 : int queue_size;
124 :
125 4240 : sz = pgaio_worker_queue_shmem_size(&queue_size);
126 4240 : sz = add_size(sz, pgaio_worker_control_shmem_size());
127 :
128 4240 : return sz;
129 : }
130 :
131 : static void
132 2270 : pgaio_worker_shmem_init(bool first_time)
133 : {
134 : bool found;
135 : int queue_size;
136 :
137 2270 : io_worker_submission_queue =
138 2270 : ShmemInitStruct("AioWorkerSubmissionQueue",
139 : pgaio_worker_queue_shmem_size(&queue_size),
140 : &found);
141 2270 : if (!found)
142 : {
143 2270 : io_worker_submission_queue->size = queue_size;
144 2270 : io_worker_submission_queue->head = 0;
145 2270 : io_worker_submission_queue->tail = 0;
146 : }
147 :
148 2270 : io_worker_control =
149 2270 : ShmemInitStruct("AioWorkerControl",
150 : pgaio_worker_control_shmem_size(),
151 : &found);
152 2270 : if (!found)
153 : {
154 2270 : io_worker_control->idle_worker_mask = 0;
155 74910 : for (int i = 0; i < MAX_IO_WORKERS; ++i)
156 : {
157 72640 : io_worker_control->workers[i].latch = NULL;
158 72640 : io_worker_control->workers[i].in_use = false;
159 : }
160 : }
161 2270 : }
162 :
163 : static int
164 1172522 : pgaio_worker_choose_idle(void)
165 : {
166 : int worker;
167 :
168 1172522 : if (io_worker_control->idle_worker_mask == 0)
169 18286 : return -1;
170 :
171 : /* Find the lowest bit position, and clear it. */
172 1154236 : worker = pg_rightmost_one_pos64(io_worker_control->idle_worker_mask);
173 1154236 : io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << worker);
174 : Assert(io_worker_control->workers[worker].in_use);
175 :
176 1154236 : return worker;
177 : }
178 :
179 : static bool
180 1143866 : pgaio_worker_submission_queue_insert(PgAioHandle *ioh)
181 : {
182 : PgAioWorkerSubmissionQueue *queue;
183 : uint32 new_head;
184 :
185 1143866 : queue = io_worker_submission_queue;
186 1143866 : new_head = (queue->head + 1) & (queue->size - 1);
187 1143866 : if (new_head == queue->tail)
188 : {
189 0 : pgaio_debug(DEBUG3, "io queue is full, at %u elements",
190 : io_worker_submission_queue->size);
191 0 : return false; /* full */
192 : }
193 :
194 1143866 : queue->sqes[queue->head] = pgaio_io_get_id(ioh);
195 1143866 : queue->head = new_head;
196 :
197 1143866 : return true;
198 : }
199 :
200 : static int
201 1886216 : pgaio_worker_submission_queue_consume(void)
202 : {
203 : PgAioWorkerSubmissionQueue *queue;
204 : int result;
205 :
206 1886216 : queue = io_worker_submission_queue;
207 1886216 : if (queue->tail == queue->head)
208 953214 : return -1; /* empty */
209 :
210 933002 : result = queue->sqes[queue->tail];
211 933002 : queue->tail = (queue->tail + 1) & (queue->size - 1);
212 :
213 933002 : return result;
214 : }
215 :
216 : static uint32
217 1863754 : pgaio_worker_submission_queue_depth(void)
218 : {
219 : uint32 head;
220 : uint32 tail;
221 :
222 1863754 : head = io_worker_submission_queue->head;
223 1863754 : tail = io_worker_submission_queue->tail;
224 :
225 1863754 : if (tail > head)
226 1038 : head += io_worker_submission_queue->size;
227 :
228 : Assert(head >= tail);
229 :
230 1863754 : return head - tail;
231 : }
232 :
233 : static bool
234 1153312 : pgaio_worker_needs_synchronous_execution(PgAioHandle *ioh)
235 : {
236 : return
237 1153312 : !IsUnderPostmaster
238 1146170 : || ioh->flags & PGAIO_HF_REFERENCES_LOCAL
239 2299482 : || !pgaio_io_can_reopen(ioh);
240 : }
241 :
242 : static void
243 1142734 : pgaio_worker_submit_internal(int num_staged_ios, PgAioHandle **staged_ios)
244 : {
245 : PgAioHandle *synchronous_ios[PGAIO_SUBMIT_BATCH_SIZE];
246 1142734 : int nsync = 0;
247 1142734 : Latch *wakeup = NULL;
248 : int worker;
249 :
250 : Assert(num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE);
251 :
252 1142734 : LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
253 2286600 : for (int i = 0; i < num_staged_ios; ++i)
254 : {
255 : Assert(!pgaio_worker_needs_synchronous_execution(staged_ios[i]));
256 1143866 : if (!pgaio_worker_submission_queue_insert(staged_ios[i]))
257 : {
258 : /*
259 : * We'll do it synchronously, but only after we've sent as many as
260 : * we can to workers, to maximize concurrency.
261 : */
262 0 : synchronous_ios[nsync++] = staged_ios[i];
263 0 : continue;
264 : }
265 :
266 1143866 : if (wakeup == NULL)
267 : {
268 : /* Choose an idle worker to wake up if we haven't already. */
269 1142744 : worker = pgaio_worker_choose_idle();
270 1142744 : if (worker >= 0)
271 1134090 : wakeup = io_worker_control->workers[worker].latch;
272 :
273 1142744 : pgaio_debug_io(DEBUG4, staged_ios[i],
274 : "choosing worker %d",
275 : worker);
276 : }
277 : }
278 1142734 : LWLockRelease(AioWorkerSubmissionQueueLock);
279 :
280 1142734 : if (wakeup)
281 1134090 : SetLatch(wakeup);
282 :
283 : /* Run whatever is left synchronously. */
284 1142734 : if (nsync > 0)
285 : {
286 0 : for (int i = 0; i < nsync; ++i)
287 : {
288 0 : pgaio_io_perform_synchronously(synchronous_ios[i]);
289 : }
290 : }
291 1142734 : }
292 :
293 : static int
294 1142734 : pgaio_worker_submit(uint16 num_staged_ios, PgAioHandle **staged_ios)
295 : {
296 2286600 : for (int i = 0; i < num_staged_ios; i++)
297 : {
298 1143866 : PgAioHandle *ioh = staged_ios[i];
299 :
300 1143866 : pgaio_io_prepare_submit(ioh);
301 : }
302 :
303 1142734 : pgaio_worker_submit_internal(num_staged_ios, staged_ios);
304 :
305 1142734 : return num_staged_ios;
306 : }
307 :
308 : /*
309 : * on_shmem_exit() callback that releases the worker's slot in
310 : * io_worker_control.
311 : */
312 : static void
313 3492 : pgaio_worker_die(int code, Datum arg)
314 : {
315 3492 : LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
316 : Assert(io_worker_control->workers[MyIoWorkerId].in_use);
317 : Assert(io_worker_control->workers[MyIoWorkerId].latch == MyLatch);
318 :
319 3492 : io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << MyIoWorkerId);
320 3492 : io_worker_control->workers[MyIoWorkerId].in_use = false;
321 3492 : io_worker_control->workers[MyIoWorkerId].latch = NULL;
322 3492 : LWLockRelease(AioWorkerSubmissionQueueLock);
323 3492 : }
324 :
325 : /*
326 : * Register the worker in shared memory, assign MyIoWorkerId and register a
327 : * shutdown callback to release registration.
328 : */
329 : static void
330 3492 : pgaio_worker_register(void)
331 : {
332 3492 : MyIoWorkerId = -1;
333 :
334 : /*
335 : * XXX: This could do with more fine-grained locking. But it's also not
336 : * very common for the number of workers to change at the moment...
337 : */
338 3492 : LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
339 :
340 7918 : for (int i = 0; i < MAX_IO_WORKERS; ++i)
341 : {
342 7918 : if (!io_worker_control->workers[i].in_use)
343 : {
344 : Assert(io_worker_control->workers[i].latch == NULL);
345 3492 : io_worker_control->workers[i].in_use = true;
346 3492 : MyIoWorkerId = i;
347 3492 : break;
348 : }
349 : else
350 : Assert(io_worker_control->workers[i].latch != NULL);
351 : }
352 :
353 3492 : if (MyIoWorkerId == -1)
354 0 : elog(ERROR, "couldn't find a free worker slot");
355 :
356 3492 : io_worker_control->idle_worker_mask |= (UINT64_C(1) << MyIoWorkerId);
357 3492 : io_worker_control->workers[MyIoWorkerId].latch = MyLatch;
358 3492 : LWLockRelease(AioWorkerSubmissionQueueLock);
359 :
360 3492 : on_shmem_exit(pgaio_worker_die, 0);
361 3492 : }
362 :
363 : static void
364 2254 : pgaio_worker_error_callback(void *arg)
365 : {
366 : ProcNumber owner;
367 : PGPROC *owner_proc;
368 : int32 owner_pid;
369 2254 : PgAioHandle *ioh = arg;
370 :
371 2254 : if (!ioh)
372 0 : return;
373 :
374 : Assert(ioh->owner_procno != MyProcNumber);
375 : Assert(MyBackendType == B_IO_WORKER);
376 :
377 2254 : owner = ioh->owner_procno;
378 2254 : owner_proc = GetPGProcByNumber(owner);
379 2254 : owner_pid = owner_proc->pid;
380 :
381 2254 : errcontext("I/O worker executing I/O on behalf of process %d", owner_pid);
382 : }
383 :
384 : void
385 3492 : IoWorkerMain(const void *startup_data, size_t startup_data_len)
386 : {
387 : sigjmp_buf local_sigjmp_buf;
388 3492 : PgAioHandle *volatile error_ioh = NULL;
389 3492 : ErrorContextCallback errcallback = {0};
390 3492 : volatile int error_errno = 0;
391 : char cmd[128];
392 :
393 3492 : AuxiliaryProcessMainCommon();
394 :
395 3492 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
396 3492 : pqsignal(SIGINT, die); /* to allow manually triggering worker restart */
397 :
398 : /*
399 : * Ignore SIGTERM, will get explicit shutdown via SIGUSR2 later in the
400 : * shutdown sequence, similar to checkpointer.
401 : */
402 3492 : pqsignal(SIGTERM, SIG_IGN);
403 : /* SIGQUIT handler was already set up by InitPostmasterChild */
404 3492 : pqsignal(SIGALRM, SIG_IGN);
405 3492 : pqsignal(SIGPIPE, SIG_IGN);
406 3492 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
407 3492 : pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
408 :
409 : /* also registers a shutdown callback to unregister */
410 3492 : pgaio_worker_register();
411 :
412 3492 : sprintf(cmd, "%d", MyIoWorkerId);
413 3492 : set_ps_display(cmd);
414 :
415 3492 : errcallback.callback = pgaio_worker_error_callback;
416 3492 : errcallback.previous = error_context_stack;
417 3492 : error_context_stack = &errcallback;
418 :
419 : /* see PostgresMain() */
420 3492 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
421 : {
422 2 : error_context_stack = NULL;
423 2 : HOLD_INTERRUPTS();
424 :
425 2 : EmitErrorReport();
426 :
427 : /*
428 : * In the - very unlikely - case that the IO failed in a way that
429 : * raises an error we need to mark the IO as failed.
430 : *
431 : * Need to do just enough error recovery so that we can mark the IO as
432 : * failed and then exit (postmaster will start a new worker).
433 : */
434 2 : LWLockReleaseAll();
435 :
436 2 : if (error_ioh != NULL)
437 : {
438 : /* should never fail without setting error_errno */
439 : Assert(error_errno != 0);
440 :
441 2 : errno = error_errno;
442 :
443 2 : START_CRIT_SECTION();
444 2 : pgaio_io_process_completion(error_ioh, -error_errno);
445 2 : END_CRIT_SECTION();
446 : }
447 :
448 2 : proc_exit(1);
449 : }
450 :
451 : /* We can now handle ereport(ERROR) */
452 3492 : PG_exception_stack = &local_sigjmp_buf;
453 :
454 3492 : sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
455 :
456 1889662 : while (!ShutdownRequestPending)
457 : {
458 : uint32 io_index;
459 : Latch *latches[IO_WORKER_WAKEUP_FANOUT];
460 1886216 : int nlatches = 0;
461 1886216 : int nwakeups = 0;
462 : int worker;
463 :
464 : /*
465 : * Try to get a job to do.
466 : *
467 : * The lwlock acquisition also provides the necessary memory barrier
468 : * to ensure that we don't see an outdated data in the handle.
469 : */
470 1886216 : LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
471 1886216 : if ((io_index = pgaio_worker_submission_queue_consume()) == -1)
472 : {
473 : /*
474 : * Nothing to do. Mark self idle.
475 : *
476 : * XXX: Invent some kind of back pressure to reduce useless
477 : * wakeups?
478 : */
479 953214 : io_worker_control->idle_worker_mask |= (UINT64_C(1) << MyIoWorkerId);
480 : }
481 : else
482 : {
483 : /* Got one. Clear idle flag. */
484 933002 : io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << MyIoWorkerId);
485 :
486 : /* See if we can wake up some peers. */
487 933002 : nwakeups = Min(pgaio_worker_submission_queue_depth(),
488 : IO_WORKER_WAKEUP_FANOUT);
489 953148 : for (int i = 0; i < nwakeups; ++i)
490 : {
491 29778 : if ((worker = pgaio_worker_choose_idle()) < 0)
492 9632 : break;
493 20146 : latches[nlatches++] = io_worker_control->workers[worker].latch;
494 : }
495 : }
496 1886216 : LWLockRelease(AioWorkerSubmissionQueueLock);
497 :
498 1906362 : for (int i = 0; i < nlatches; ++i)
499 20146 : SetLatch(latches[i]);
500 :
501 1886216 : if (io_index != -1)
502 : {
503 933002 : PgAioHandle *ioh = NULL;
504 :
505 933002 : ioh = &pgaio_ctl->io_handles[io_index];
506 933002 : error_ioh = ioh;
507 933002 : errcallback.arg = ioh;
508 :
509 933002 : pgaio_debug_io(DEBUG4, ioh,
510 : "worker %d processing IO",
511 : MyIoWorkerId);
512 :
513 : /*
514 : * Prevent interrupts between pgaio_io_reopen() and
515 : * pgaio_io_perform_synchronously() that otherwise could lead to
516 : * the FD getting closed in that window.
517 : */
518 933002 : HOLD_INTERRUPTS();
519 :
520 : /*
521 : * It's very unlikely, but possible, that reopen fails. E.g. due
522 : * to memory allocations failing or file permissions changing or
523 : * such. In that case we need to fail the IO.
524 : *
525 : * There's not really a good errno we can report here.
526 : */
527 933002 : error_errno = ENOENT;
528 933002 : pgaio_io_reopen(ioh);
529 :
530 : /*
531 : * To be able to exercise the reopen-fails path, allow injection
532 : * points to trigger a failure at this point.
533 : */
534 933002 : INJECTION_POINT("aio-worker-after-reopen", ioh);
535 :
536 933000 : error_errno = 0;
537 933000 : error_ioh = NULL;
538 :
539 : /*
540 : * As part of IO completion the buffer will be marked as NOACCESS,
541 : * until the buffer is pinned again - which never happens in io
542 : * workers. Therefore the next time there is IO for the same
543 : * buffer, the memory will be considered inaccessible. To avoid
544 : * that, explicitly allow access to the memory before reading data
545 : * into it.
546 : */
547 : #ifdef USE_VALGRIND
548 : {
549 : struct iovec *iov;
550 : uint16 iov_length = pgaio_io_get_iovec_length(ioh, &iov);
551 :
552 : for (int i = 0; i < iov_length; i++)
553 : VALGRIND_MAKE_MEM_UNDEFINED(iov[i].iov_base, iov[i].iov_len);
554 : }
555 : #endif
556 :
557 : /*
558 : * We don't expect this to ever fail with ERROR or FATAL, no need
559 : * to keep error_ioh set to the IO.
560 : * pgaio_io_perform_synchronously() contains a critical section to
561 : * ensure we don't accidentally fail.
562 : */
563 933000 : pgaio_io_perform_synchronously(ioh);
564 :
565 933000 : RESUME_INTERRUPTS();
566 933000 : errcallback.arg = NULL;
567 : }
568 : else
569 : {
570 953214 : WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, -1,
571 : WAIT_EVENT_IO_WORKER_MAIN);
572 953178 : ResetLatch(MyLatch);
573 : }
574 :
575 1886178 : CHECK_FOR_INTERRUPTS();
576 :
577 1886170 : if (ConfigReloadPending)
578 : {
579 504 : ConfigReloadPending = false;
580 504 : ProcessConfigFile(PGC_SIGHUP);
581 : }
582 : }
583 :
584 3446 : error_context_stack = errcallback.previous;
585 3446 : proc_exit(0);
586 : }
587 :
588 : bool
589 278396 : pgaio_workers_enabled(void)
590 : {
591 278396 : return io_method == IOMETHOD_WORKER;
592 : }
|