Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * checkpointer.c
4 : *
5 : * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
6 : * Checkpoints are automatically dispatched after a certain amount of time has
7 : * elapsed since the last one, and it can be signaled to perform requested
8 : * checkpoints as well. (The GUC parameter that mandates a checkpoint every
9 : * so many WAL segments is implemented by having backends signal when they
10 : * fill WAL segments; the checkpointer itself doesn't watch for the
11 : * condition.)
12 : *
13 : * The normal termination sequence is that checkpointer is instructed to
14 : * execute the shutdown checkpoint by SIGINT. After that checkpointer waits
15 : * to be terminated via SIGUSR2, which instructs the checkpointer to exit(0).
16 : * All backends must be stopped before SIGINT or SIGUSR2 is issued!
17 : *
18 : * Emergency termination is by SIGQUIT; like any backend, the checkpointer
19 : * will simply abort and exit on SIGQUIT.
20 : *
21 : * If the checkpointer exits unexpectedly, the postmaster treats that the same
22 : * as a backend crash: shared memory may be corrupted, so remaining backends
23 : * should be killed by SIGQUIT and then a recovery cycle started. (Even if
24 : * shared memory isn't corrupted, we have lost information about which
25 : * files need to be fsync'd for the next checkpoint, and so a system
26 : * restart needs to be forced.)
27 : *
28 : *
29 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 : *
31 : *
32 : * IDENTIFICATION
33 : * src/backend/postmaster/checkpointer.c
34 : *
35 : *-------------------------------------------------------------------------
36 : */
37 : #include "postgres.h"
38 :
39 : #include <sys/time.h>
40 : #include <time.h>
41 :
42 : #include "access/xlog.h"
43 : #include "access/xlog_internal.h"
44 : #include "access/xlogrecovery.h"
45 : #include "catalog/pg_authid.h"
46 : #include "commands/defrem.h"
47 : #include "libpq/pqsignal.h"
48 : #include "miscadmin.h"
49 : #include "pgstat.h"
50 : #include "postmaster/auxprocess.h"
51 : #include "postmaster/bgwriter.h"
52 : #include "postmaster/interrupt.h"
53 : #include "replication/syncrep.h"
54 : #include "storage/aio_subsys.h"
55 : #include "storage/bufmgr.h"
56 : #include "storage/condition_variable.h"
57 : #include "storage/fd.h"
58 : #include "storage/ipc.h"
59 : #include "storage/lwlock.h"
60 : #include "storage/pmsignal.h"
61 : #include "storage/proc.h"
62 : #include "storage/procsignal.h"
63 : #include "storage/shmem.h"
64 : #include "storage/smgr.h"
65 : #include "storage/spin.h"
66 : #include "utils/acl.h"
67 : #include "utils/guc.h"
68 : #include "utils/memutils.h"
69 : #include "utils/resowner.h"
70 :
71 :
72 : /*----------
73 : * Shared memory area for communication between checkpointer and backends
74 : *
75 : * The ckpt counters allow backends to watch for completion of a checkpoint
76 : * request they send. Here's how it works:
77 : * * At start of a checkpoint, checkpointer reads (and clears) the request
78 : * flags and increments ckpt_started, while holding ckpt_lck.
79 : * * On completion of a checkpoint, checkpointer sets ckpt_done to
80 : * equal ckpt_started.
81 : * * On failure of a checkpoint, checkpointer increments ckpt_failed
82 : * and sets ckpt_done to equal ckpt_started.
83 : *
84 : * The algorithm for backends is:
85 : * 1. Record current values of ckpt_failed and ckpt_started, and
86 : * set request flags, while holding ckpt_lck.
87 : * 2. Send signal to request checkpoint.
88 : * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
89 : * begun since you started this algorithm (although *not* that it was
90 : * specifically initiated by your signal), and that it is using your flags.
91 : * 4. Record new value of ckpt_started.
92 : * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
93 : * arithmetic here in case counters wrap around.) Now you know a
94 : * checkpoint has started and completed, but not whether it was
95 : * successful.
96 : * 6. If ckpt_failed is different from the originally saved value,
97 : * assume request failed; otherwise it was definitely successful.
98 : *
99 : * ckpt_flags holds the OR of the checkpoint request flags sent by all
100 : * requesting backends since the last checkpoint start. The flags are
101 : * chosen so that OR'ing is the correct way to combine multiple requests.
102 : *
103 : * The requests array holds fsync requests sent by backends and not yet
104 : * absorbed by the checkpointer.
105 : *
106 : * Unlike the checkpoint fields, requests related fields are protected by
107 : * CheckpointerCommLock.
108 : *----------
109 : */
110 : typedef struct
111 : {
112 : SyncRequestType type; /* request type */
113 : FileTag ftag; /* file identifier */
114 : } CheckpointerRequest;
115 :
116 : typedef struct
117 : {
118 : pid_t checkpointer_pid; /* PID (0 if not started) */
119 :
120 : slock_t ckpt_lck; /* protects all the ckpt_* fields */
121 :
122 : int ckpt_started; /* advances when checkpoint starts */
123 : int ckpt_done; /* advances when checkpoint done */
124 : int ckpt_failed; /* advances when checkpoint fails */
125 :
126 : int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
127 :
128 : ConditionVariable start_cv; /* signaled when ckpt_started advances */
129 : ConditionVariable done_cv; /* signaled when ckpt_done advances */
130 :
131 : int num_requests; /* current # of requests */
132 : int max_requests; /* allocated array size */
133 : CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
134 : } CheckpointerShmemStruct;
135 :
136 : static CheckpointerShmemStruct *CheckpointerShmem;
137 :
138 : /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
139 : #define WRITES_PER_ABSORB 1000
140 :
141 : /*
142 : * GUC parameters
143 : */
144 : int CheckPointTimeout = 300;
145 : int CheckPointWarning = 30;
146 : double CheckPointCompletionTarget = 0.9;
147 :
148 : /*
149 : * Private state
150 : */
151 : static bool ckpt_active = false;
152 : static volatile sig_atomic_t ShutdownXLOGPending = false;
153 :
154 : /* these values are valid when ckpt_active is true: */
155 : static pg_time_t ckpt_start_time;
156 : static XLogRecPtr ckpt_start_recptr;
157 : static double ckpt_cached_elapsed;
158 :
159 : static pg_time_t last_checkpoint_time;
160 : static pg_time_t last_xlog_switch_time;
161 :
162 : /* Prototypes for private functions */
163 :
164 : static void ProcessCheckpointerInterrupts(void);
165 : static void CheckArchiveTimeout(void);
166 : static bool IsCheckpointOnSchedule(double progress);
167 : static bool FastCheckpointRequested(void);
168 : static bool CompactCheckpointerRequestQueue(void);
169 : static void UpdateSharedMemoryConfig(void);
170 :
171 : /* Signal handlers */
172 : static void ReqShutdownXLOG(SIGNAL_ARGS);
173 :
174 :
175 : /*
176 : * Main entry point for checkpointer process
177 : *
178 : * This is invoked from AuxiliaryProcessMain, which has already created the
179 : * basic execution environment, but not enabled signals yet.
180 : */
181 : void
182 1026 : CheckpointerMain(const void *startup_data, size_t startup_data_len)
183 : {
184 : sigjmp_buf local_sigjmp_buf;
185 : MemoryContext checkpointer_context;
186 :
187 : Assert(startup_data_len == 0);
188 :
189 1026 : MyBackendType = B_CHECKPOINTER;
190 1026 : AuxiliaryProcessMainCommon();
191 :
192 1026 : CheckpointerShmem->checkpointer_pid = MyProcPid;
193 :
194 : /*
195 : * Properly accept or ignore signals the postmaster might send us
196 : *
197 : * Note: we deliberately ignore SIGTERM, because during a standard Unix
198 : * system shutdown cycle, init will SIGTERM all processes at once. We
199 : * want to wait for the backends to exit, whereupon the postmaster will
200 : * tell us it's okay to shut down (via SIGUSR2).
201 : */
202 1026 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
203 1026 : pqsignal(SIGINT, ReqShutdownXLOG);
204 1026 : pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
205 : /* SIGQUIT handler was already set up by InitPostmasterChild */
206 1026 : pqsignal(SIGALRM, SIG_IGN);
207 1026 : pqsignal(SIGPIPE, SIG_IGN);
208 1026 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
209 1026 : pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
210 :
211 : /*
212 : * Reset some signals that are accepted by postmaster but not here
213 : */
214 1026 : pqsignal(SIGCHLD, SIG_DFL);
215 :
216 : /*
217 : * Initialize so that first time-driven event happens at the correct time.
218 : */
219 1026 : last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
220 :
221 : /*
222 : * Write out stats after shutdown. This needs to be called by exactly one
223 : * process during a normal shutdown, and since checkpointer is shut down
224 : * very late...
225 : *
226 : * While e.g. walsenders are active after the shutdown checkpoint has been
227 : * written (and thus could produce more stats), checkpointer stays around
228 : * after the shutdown checkpoint has been written. postmaster will only
229 : * signal checkpointer to exit after all processes that could emit stats
230 : * have been shut down.
231 : */
232 1026 : before_shmem_exit(pgstat_before_server_shutdown, 0);
233 :
234 : /*
235 : * Create a memory context that we will do all our work in. We do this so
236 : * that we can reset the context during error recovery and thereby avoid
237 : * possible memory leaks. Formerly this code just ran in
238 : * TopMemoryContext, but resetting that would be a really bad idea.
239 : */
240 1026 : checkpointer_context = AllocSetContextCreate(TopMemoryContext,
241 : "Checkpointer",
242 : ALLOCSET_DEFAULT_SIZES);
243 1026 : MemoryContextSwitchTo(checkpointer_context);
244 :
245 : /*
246 : * If an exception is encountered, processing resumes here.
247 : *
248 : * You might wonder why this isn't coded as an infinite loop around a
249 : * PG_TRY construct. The reason is that this is the bottom of the
250 : * exception stack, and so with PG_TRY there would be no exception handler
251 : * in force at all during the CATCH part. By leaving the outermost setjmp
252 : * always active, we have at least some chance of recovering from an error
253 : * during error recovery. (If we get into an infinite loop thereby, it
254 : * will soon be stopped by overflow of elog.c's internal state stack.)
255 : *
256 : * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
257 : * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
258 : * signals other than SIGQUIT will be blocked until we complete error
259 : * recovery. It might seem that this policy makes the HOLD_INTERRUPTS()
260 : * call redundant, but it is not since InterruptPending might be set
261 : * already.
262 : */
263 1026 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
264 : {
265 : /* Since not using PG_TRY, must reset error stack by hand */
266 0 : error_context_stack = NULL;
267 :
268 : /* Prevent interrupts while cleaning up */
269 0 : HOLD_INTERRUPTS();
270 :
271 : /* Report the error to the server log */
272 0 : EmitErrorReport();
273 :
274 : /*
275 : * These operations are really just a minimal subset of
276 : * AbortTransaction(). We don't have very many resources to worry
277 : * about in checkpointer, but we do have LWLocks, buffers, and temp
278 : * files.
279 : */
280 0 : LWLockReleaseAll();
281 0 : ConditionVariableCancelSleep();
282 0 : pgstat_report_wait_end();
283 0 : pgaio_error_cleanup();
284 0 : UnlockBuffers();
285 0 : ReleaseAuxProcessResources(false);
286 0 : AtEOXact_Buffers(false);
287 0 : AtEOXact_SMgr();
288 0 : AtEOXact_Files(false);
289 0 : AtEOXact_HashTables(false);
290 :
291 : /* Warn any waiting backends that the checkpoint failed. */
292 0 : if (ckpt_active)
293 : {
294 0 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
295 0 : CheckpointerShmem->ckpt_failed++;
296 0 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
297 0 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
298 :
299 0 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
300 :
301 0 : ckpt_active = false;
302 : }
303 :
304 : /*
305 : * Now return to normal top-level context and clear ErrorContext for
306 : * next time.
307 : */
308 0 : MemoryContextSwitchTo(checkpointer_context);
309 0 : FlushErrorState();
310 :
311 : /* Flush any leaked data in the top-level context */
312 0 : MemoryContextReset(checkpointer_context);
313 :
314 : /* Now we can allow interrupts again */
315 0 : RESUME_INTERRUPTS();
316 :
317 : /*
318 : * Sleep at least 1 second after any error. A write error is likely
319 : * to be repeated, and we don't want to be filling the error logs as
320 : * fast as we can.
321 : */
322 0 : pg_usleep(1000000L);
323 : }
324 :
325 : /* We can now handle ereport(ERROR) */
326 1026 : PG_exception_stack = &local_sigjmp_buf;
327 :
328 : /*
329 : * Unblock signals (they were blocked when the postmaster forked us)
330 : */
331 1026 : sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
332 :
333 : /*
334 : * Ensure all shared memory values are set correctly for the config. Doing
335 : * this here ensures no race conditions from other concurrent updaters.
336 : */
337 1026 : UpdateSharedMemoryConfig();
338 :
339 : /*
340 : * Advertise our proc number that backends can use to wake us up while
341 : * we're sleeping.
342 : */
343 1026 : ProcGlobal->checkpointerProc = MyProcNumber;
344 :
345 : /*
346 : * Loop until we've been asked to write the shutdown checkpoint or
347 : * terminate.
348 : */
349 : for (;;)
350 7156 : {
351 8182 : bool do_checkpoint = false;
352 8182 : int flags = 0;
353 : pg_time_t now;
354 : int elapsed_secs;
355 : int cur_timeout;
356 8182 : bool chkpt_or_rstpt_requested = false;
357 8182 : bool chkpt_or_rstpt_timed = false;
358 :
359 : /* Clear any already-pending wakeups */
360 8182 : ResetLatch(MyLatch);
361 :
362 : /*
363 : * Process any requests or signals received recently.
364 : */
365 8182 : AbsorbSyncRequests();
366 :
367 8182 : ProcessCheckpointerInterrupts();
368 8182 : if (ShutdownXLOGPending || ShutdownRequestPending)
369 : break;
370 :
371 : /*
372 : * Detect a pending checkpoint request by checking whether the flags
373 : * word in shared memory is nonzero. We shouldn't need to acquire the
374 : * ckpt_lck for this.
375 : */
376 7178 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
377 : {
378 2500 : do_checkpoint = true;
379 2500 : chkpt_or_rstpt_requested = true;
380 : }
381 :
382 : /*
383 : * Force a checkpoint if too much time has elapsed since the last one.
384 : * Note that we count a timed checkpoint in stats only when this
385 : * occurs without an external request, but we set the CAUSE_TIME flag
386 : * bit even if there is also an external request.
387 : */
388 7178 : now = (pg_time_t) time(NULL);
389 7178 : elapsed_secs = now - last_checkpoint_time;
390 7178 : if (elapsed_secs >= CheckPointTimeout)
391 : {
392 6 : if (!do_checkpoint)
393 6 : chkpt_or_rstpt_timed = true;
394 6 : do_checkpoint = true;
395 6 : flags |= CHECKPOINT_CAUSE_TIME;
396 : }
397 :
398 : /*
399 : * Do a checkpoint if requested.
400 : */
401 7178 : if (do_checkpoint)
402 : {
403 2506 : bool ckpt_performed = false;
404 : bool do_restartpoint;
405 :
406 : /* Check if we should perform a checkpoint or a restartpoint. */
407 2506 : do_restartpoint = RecoveryInProgress();
408 :
409 : /*
410 : * Atomically fetch the request flags to figure out what kind of a
411 : * checkpoint we should perform, and increase the started-counter
412 : * to acknowledge that we've started a new checkpoint.
413 : */
414 2506 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
415 2506 : flags |= CheckpointerShmem->ckpt_flags;
416 2506 : CheckpointerShmem->ckpt_flags = 0;
417 2506 : CheckpointerShmem->ckpt_started++;
418 2506 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
419 :
420 2506 : ConditionVariableBroadcast(&CheckpointerShmem->start_cv);
421 :
422 : /*
423 : * The end-of-recovery checkpoint is a real checkpoint that's
424 : * performed while we're still in recovery.
425 : */
426 2506 : if (flags & CHECKPOINT_END_OF_RECOVERY)
427 40 : do_restartpoint = false;
428 :
429 2506 : if (chkpt_or_rstpt_timed)
430 : {
431 6 : chkpt_or_rstpt_timed = false;
432 6 : if (do_restartpoint)
433 0 : PendingCheckpointerStats.restartpoints_timed++;
434 : else
435 6 : PendingCheckpointerStats.num_timed++;
436 : }
437 :
438 2506 : if (chkpt_or_rstpt_requested)
439 : {
440 2500 : chkpt_or_rstpt_requested = false;
441 2500 : if (do_restartpoint)
442 1052 : PendingCheckpointerStats.restartpoints_requested++;
443 : else
444 1448 : PendingCheckpointerStats.num_requested++;
445 : }
446 :
447 : /*
448 : * We will warn if (a) too soon since last checkpoint (whatever
449 : * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
450 : * since the last checkpoint start. Note in particular that this
451 : * implementation will not generate warnings caused by
452 : * CheckPointTimeout < CheckPointWarning.
453 : */
454 2506 : if (!do_restartpoint &&
455 1454 : (flags & CHECKPOINT_CAUSE_XLOG) &&
456 378 : elapsed_secs < CheckPointWarning)
457 378 : ereport(LOG,
458 : (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
459 : "checkpoints are occurring too frequently (%d seconds apart)",
460 : elapsed_secs,
461 : elapsed_secs),
462 : errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
463 :
464 : /*
465 : * Initialize checkpointer-private variables used during
466 : * checkpoint.
467 : */
468 2506 : ckpt_active = true;
469 2506 : if (do_restartpoint)
470 1052 : ckpt_start_recptr = GetXLogReplayRecPtr(NULL);
471 : else
472 1454 : ckpt_start_recptr = GetInsertRecPtr();
473 2506 : ckpt_start_time = now;
474 2506 : ckpt_cached_elapsed = 0;
475 :
476 : /*
477 : * Do the checkpoint.
478 : */
479 2506 : if (!do_restartpoint)
480 1454 : ckpt_performed = CreateCheckPoint(flags);
481 : else
482 1052 : ckpt_performed = CreateRestartPoint(flags);
483 :
484 : /*
485 : * After any checkpoint, free all smgr objects. Otherwise we
486 : * would never do so for dropped relations, as the checkpointer
487 : * does not process shared invalidation messages or call
488 : * AtEOXact_SMgr().
489 : */
490 2506 : smgrdestroyall();
491 :
492 : /*
493 : * Indicate checkpoint completion to any waiting backends.
494 : */
495 2506 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
496 2506 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
497 2506 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
498 :
499 2506 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
500 :
501 2506 : if (!do_restartpoint)
502 : {
503 : /*
504 : * Note we record the checkpoint start time not end time as
505 : * last_checkpoint_time. This is so that time-driven
506 : * checkpoints happen at a predictable spacing.
507 : */
508 1454 : last_checkpoint_time = now;
509 :
510 1454 : if (ckpt_performed)
511 1454 : PendingCheckpointerStats.num_performed++;
512 : }
513 : else
514 : {
515 1052 : if (ckpt_performed)
516 : {
517 : /*
518 : * The same as for checkpoint. Please see the
519 : * corresponding comment.
520 : */
521 334 : last_checkpoint_time = now;
522 :
523 334 : PendingCheckpointerStats.restartpoints_performed++;
524 : }
525 : else
526 : {
527 : /*
528 : * We were not able to perform the restartpoint
529 : * (checkpoints throw an ERROR in case of error). Most
530 : * likely because we have not received any new checkpoint
531 : * WAL records since the last restartpoint. Try again in
532 : * 15 s.
533 : */
534 718 : last_checkpoint_time = now - CheckPointTimeout + 15;
535 : }
536 : }
537 :
538 2506 : ckpt_active = false;
539 :
540 : /*
541 : * We may have received an interrupt during the checkpoint and the
542 : * latch might have been reset (e.g. in CheckpointWriteDelay).
543 : */
544 2506 : ProcessCheckpointerInterrupts();
545 2506 : if (ShutdownXLOGPending || ShutdownRequestPending)
546 : break;
547 : }
548 :
549 : /* Check for archive_timeout and switch xlog files if necessary. */
550 7164 : CheckArchiveTimeout();
551 :
552 : /* Report pending statistics to the cumulative stats system */
553 7164 : pgstat_report_checkpointer();
554 7164 : pgstat_report_wal(true);
555 :
556 : /*
557 : * If any checkpoint flags have been set, redo the loop to handle the
558 : * checkpoint without sleeping.
559 : */
560 7164 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
561 408 : continue;
562 :
563 : /*
564 : * Sleep until we are signaled or it's time for another checkpoint or
565 : * xlog file switch.
566 : */
567 6756 : now = (pg_time_t) time(NULL);
568 6756 : elapsed_secs = now - last_checkpoint_time;
569 6756 : if (elapsed_secs >= CheckPointTimeout)
570 0 : continue; /* no sleep for us ... */
571 6756 : cur_timeout = CheckPointTimeout - elapsed_secs;
572 6756 : if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
573 : {
574 0 : elapsed_secs = now - last_xlog_switch_time;
575 0 : if (elapsed_secs >= XLogArchiveTimeout)
576 0 : continue; /* no sleep for us ... */
577 0 : cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
578 : }
579 :
580 6756 : (void) WaitLatch(MyLatch,
581 : WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
582 : cur_timeout * 1000L /* convert to ms */ ,
583 : WAIT_EVENT_CHECKPOINTER_MAIN);
584 : }
585 :
586 : /*
587 : * From here on, elog(ERROR) should end with exit(1), not send control
588 : * back to the sigsetjmp block above.
589 : */
590 1018 : ExitOnAnyError = true;
591 :
592 1018 : if (ShutdownXLOGPending)
593 : {
594 : /*
595 : * Close down the database.
596 : *
597 : * Since ShutdownXLOG() creates restartpoint or checkpoint, and
598 : * updates the statistics, increment the checkpoint request and flush
599 : * out pending statistic.
600 : */
601 1018 : PendingCheckpointerStats.num_requested++;
602 1018 : ShutdownXLOG(0, 0);
603 1018 : pgstat_report_checkpointer();
604 1018 : pgstat_report_wal(true);
605 :
606 : /*
607 : * Tell postmaster that we're done.
608 : */
609 1018 : SendPostmasterSignal(PMSIGNAL_XLOG_IS_SHUTDOWN);
610 1018 : ShutdownXLOGPending = false;
611 : }
612 :
613 : /*
614 : * Wait until we're asked to shut down. By separating the writing of the
615 : * shutdown checkpoint from checkpointer exiting, checkpointer can perform
616 : * some should-be-as-late-as-possible work like writing out stats.
617 : */
618 : for (;;)
619 : {
620 : /* Clear any already-pending wakeups */
621 2034 : ResetLatch(MyLatch);
622 :
623 2034 : ProcessCheckpointerInterrupts();
624 :
625 2034 : if (ShutdownRequestPending)
626 1018 : break;
627 :
628 1016 : (void) WaitLatch(MyLatch,
629 : WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
630 : 0,
631 : WAIT_EVENT_CHECKPOINTER_SHUTDOWN);
632 : }
633 :
634 : /* Normal exit from the checkpointer is here */
635 1018 : proc_exit(0); /* done */
636 : }
637 :
638 : /*
639 : * Process any new interrupts.
640 : */
641 : static void
642 12722 : ProcessCheckpointerInterrupts(void)
643 : {
644 12722 : if (ProcSignalBarrierPending)
645 118 : ProcessProcSignalBarrier();
646 :
647 12722 : if (ConfigReloadPending)
648 : {
649 118 : ConfigReloadPending = false;
650 118 : ProcessConfigFile(PGC_SIGHUP);
651 :
652 : /*
653 : * Checkpointer is the last process to shut down, so we ask it to hold
654 : * the keys for a range of other tasks required most of which have
655 : * nothing to do with checkpointing at all.
656 : *
657 : * For various reasons, some config values can change dynamically so
658 : * the primary copy of them is held in shared memory to make sure all
659 : * backends see the same value. We make Checkpointer responsible for
660 : * updating the shared memory copy if the parameter setting changes
661 : * because of SIGHUP.
662 : */
663 118 : UpdateSharedMemoryConfig();
664 : }
665 :
666 : /* Perform logging of memory contexts of this process */
667 12722 : if (LogMemoryContextPending)
668 2 : ProcessLogMemoryContextInterrupt();
669 12722 : }
670 :
671 : /*
672 : * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
673 : *
674 : * This will switch to a new WAL file and force an archive file write if
675 : * meaningful activity is recorded in the current WAL file. This includes most
676 : * writes, including just a single checkpoint record, but excludes WAL records
677 : * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
678 : * snapshots of running transactions). Such records, depending on
679 : * configuration, occur on regular intervals and don't contain important
680 : * information. This avoids generating archives with a few unimportant
681 : * records.
682 : */
683 : static void
684 26176 : CheckArchiveTimeout(void)
685 : {
686 : pg_time_t now;
687 : pg_time_t last_time;
688 : XLogRecPtr last_switch_lsn;
689 :
690 26176 : if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
691 26176 : return;
692 :
693 0 : now = (pg_time_t) time(NULL);
694 :
695 : /* First we do a quick check using possibly-stale local state. */
696 0 : if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
697 0 : return;
698 :
699 : /*
700 : * Update local state ... note that last_xlog_switch_time is the last time
701 : * a switch was performed *or requested*.
702 : */
703 0 : last_time = GetLastSegSwitchData(&last_switch_lsn);
704 :
705 0 : last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
706 :
707 : /* Now we can do the real checks */
708 0 : if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
709 : {
710 : /*
711 : * Switch segment only when "important" WAL has been logged since the
712 : * last segment switch (last_switch_lsn points to end of segment
713 : * switch occurred in).
714 : */
715 0 : if (GetLastImportantRecPtr() > last_switch_lsn)
716 : {
717 : XLogRecPtr switchpoint;
718 :
719 : /* mark switch as unimportant, avoids triggering checkpoints */
720 0 : switchpoint = RequestXLogSwitch(true);
721 :
722 : /*
723 : * If the returned pointer points exactly to a segment boundary,
724 : * assume nothing happened.
725 : */
726 0 : if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
727 0 : elog(DEBUG1, "write-ahead log switch forced (\"archive_timeout\"=%d)",
728 : XLogArchiveTimeout);
729 : }
730 :
731 : /*
732 : * Update state in any case, so we don't retry constantly when the
733 : * system is idle.
734 : */
735 0 : last_xlog_switch_time = now;
736 : }
737 : }
738 :
739 : /*
740 : * Returns true if a fast checkpoint request is pending. (Note that this does
741 : * not check the *current* checkpoint's FAST flag, but whether there is one
742 : * pending behind it.)
743 : */
744 : static bool
745 94758 : FastCheckpointRequested(void)
746 : {
747 94758 : volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
748 :
749 : /*
750 : * We don't need to acquire the ckpt_lck in this case because we're only
751 : * looking at a single flag bit.
752 : */
753 94758 : if (cps->ckpt_flags & CHECKPOINT_FAST)
754 7334 : return true;
755 87424 : return false;
756 : }
757 :
758 : /*
759 : * CheckpointWriteDelay -- control rate of checkpoint
760 : *
761 : * This function is called after each page write performed by BufferSync().
762 : * It is responsible for throttling BufferSync()'s write rate to hit
763 : * checkpoint_completion_target.
764 : *
765 : * The checkpoint request flags should be passed in; currently the only one
766 : * examined is CHECKPOINT_FAST, which disables delays between writes.
767 : *
768 : * 'progress' is an estimate of how much of the work has been done, as a
769 : * fraction between 0.0 meaning none, and 1.0 meaning all done.
770 : */
771 : void
772 578088 : CheckpointWriteDelay(int flags, double progress)
773 : {
774 : static int absorb_counter = WRITES_PER_ABSORB;
775 :
776 : /* Do nothing if checkpoint is being executed by non-checkpointer process */
777 578088 : if (!AmCheckpointerProcess())
778 103168 : return;
779 :
780 : /*
781 : * Perform the usual duties and take a nap, unless we're behind schedule,
782 : * in which case we just try to catch up as quickly as possible.
783 : */
784 474920 : if (!(flags & CHECKPOINT_FAST) &&
785 99968 : !ShutdownXLOGPending &&
786 94758 : !ShutdownRequestPending &&
787 182182 : !FastCheckpointRequested() &&
788 87424 : IsCheckpointOnSchedule(progress))
789 : {
790 19012 : if (ConfigReloadPending)
791 : {
792 0 : ConfigReloadPending = false;
793 0 : ProcessConfigFile(PGC_SIGHUP);
794 : /* update shmem copies of config variables */
795 0 : UpdateSharedMemoryConfig();
796 : }
797 :
798 19012 : AbsorbSyncRequests();
799 19012 : absorb_counter = WRITES_PER_ABSORB;
800 :
801 19012 : CheckArchiveTimeout();
802 :
803 : /* Report interim statistics to the cumulative stats system */
804 19012 : pgstat_report_checkpointer();
805 :
806 : /*
807 : * This sleep used to be connected to bgwriter_delay, typically 200ms.
808 : * That resulted in more frequent wakeups if not much work to do.
809 : * Checkpointer and bgwriter are no longer related so take the Big
810 : * Sleep.
811 : */
812 19012 : WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT,
813 : 100,
814 : WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
815 19012 : ResetLatch(MyLatch);
816 : }
817 455908 : else if (--absorb_counter <= 0)
818 : {
819 : /*
820 : * Absorb pending fsync requests after each WRITES_PER_ABSORB write
821 : * operations even when we don't sleep, to prevent overflow of the
822 : * fsync request queue.
823 : */
824 188 : AbsorbSyncRequests();
825 188 : absorb_counter = WRITES_PER_ABSORB;
826 : }
827 :
828 : /* Check for barrier events. */
829 474920 : if (ProcSignalBarrierPending)
830 10 : ProcessProcSignalBarrier();
831 : }
832 :
833 : /*
834 : * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
835 : * (or restartpoint) in time?
836 : *
837 : * Compares the current progress against the time/segments elapsed since last
838 : * checkpoint, and returns true if the progress we've made this far is greater
839 : * than the elapsed time/segments.
840 : */
841 : static bool
842 87424 : IsCheckpointOnSchedule(double progress)
843 : {
844 : XLogRecPtr recptr;
845 : struct timeval now;
846 : double elapsed_xlogs,
847 : elapsed_time;
848 :
849 : Assert(ckpt_active);
850 :
851 : /* Scale progress according to checkpoint_completion_target. */
852 87424 : progress *= CheckPointCompletionTarget;
853 :
854 : /*
855 : * Check against the cached value first. Only do the more expensive
856 : * calculations once we reach the target previously calculated. Since
857 : * neither time or WAL insert pointer moves backwards, a freshly
858 : * calculated value can only be greater than or equal to the cached value.
859 : */
860 87424 : if (progress < ckpt_cached_elapsed)
861 61008 : return false;
862 :
863 : /*
864 : * Check progress against WAL segments written and CheckPointSegments.
865 : *
866 : * We compare the current WAL insert location against the location
867 : * computed before calling CreateCheckPoint. The code in XLogInsert that
868 : * actually triggers a checkpoint when CheckPointSegments is exceeded
869 : * compares against RedoRecPtr, so this is not completely accurate.
870 : * However, it's good enough for our purposes, we're only calculating an
871 : * estimate anyway.
872 : *
873 : * During recovery, we compare last replayed WAL record's location with
874 : * the location computed before calling CreateRestartPoint. That maintains
875 : * the same pacing as we have during checkpoints in normal operation, but
876 : * we might exceed max_wal_size by a fair amount. That's because there can
877 : * be a large gap between a checkpoint's redo-pointer and the checkpoint
878 : * record itself, and we only start the restartpoint after we've seen the
879 : * checkpoint record. (The gap is typically up to CheckPointSegments *
880 : * checkpoint_completion_target where checkpoint_completion_target is the
881 : * value that was in effect when the WAL was generated).
882 : */
883 26416 : if (RecoveryInProgress())
884 11662 : recptr = GetXLogReplayRecPtr(NULL);
885 : else
886 14754 : recptr = GetInsertRecPtr();
887 26416 : elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
888 26416 : wal_segment_size) / CheckPointSegments;
889 :
890 26416 : if (progress < elapsed_xlogs)
891 : {
892 7266 : ckpt_cached_elapsed = elapsed_xlogs;
893 7266 : return false;
894 : }
895 :
896 : /*
897 : * Check progress against time elapsed and checkpoint_timeout.
898 : */
899 19150 : gettimeofday(&now, NULL);
900 19150 : elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
901 19150 : now.tv_usec / 1000000.0) / CheckPointTimeout;
902 :
903 19150 : if (progress < elapsed_time)
904 : {
905 138 : ckpt_cached_elapsed = elapsed_time;
906 138 : return false;
907 : }
908 :
909 : /* It looks like we're on schedule. */
910 19012 : return true;
911 : }
912 :
913 :
914 : /* --------------------------------
915 : * signal handler routines
916 : * --------------------------------
917 : */
918 :
919 : /* SIGINT: set flag to trigger writing of shutdown checkpoint */
920 : static void
921 1020 : ReqShutdownXLOG(SIGNAL_ARGS)
922 : {
923 1020 : ShutdownXLOGPending = true;
924 1020 : SetLatch(MyLatch);
925 1020 : }
926 :
927 :
928 : /* --------------------------------
929 : * communication with backends
930 : * --------------------------------
931 : */
932 :
933 : /*
934 : * CheckpointerShmemSize
935 : * Compute space needed for checkpointer-related shared memory
936 : */
937 : Size
938 6114 : CheckpointerShmemSize(void)
939 : {
940 : Size size;
941 :
942 : /*
943 : * Currently, the size of the requests[] array is arbitrarily set equal to
944 : * NBuffers. This may prove too large or small ...
945 : */
946 6114 : size = offsetof(CheckpointerShmemStruct, requests);
947 6114 : size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
948 :
949 6114 : return size;
950 : }
951 :
952 : /*
953 : * CheckpointerShmemInit
954 : * Allocate and initialize checkpointer-related shared memory
955 : */
956 : void
957 2140 : CheckpointerShmemInit(void)
958 : {
959 2140 : Size size = CheckpointerShmemSize();
960 : bool found;
961 :
962 2140 : CheckpointerShmem = (CheckpointerShmemStruct *)
963 2140 : ShmemInitStruct("Checkpointer Data",
964 : size,
965 : &found);
966 :
967 2140 : if (!found)
968 : {
969 : /*
970 : * First time through, so initialize. Note that we zero the whole
971 : * requests array; this is so that CompactCheckpointerRequestQueue can
972 : * assume that any pad bytes in the request structs are zeroes.
973 : */
974 2424 : MemSet(CheckpointerShmem, 0, size);
975 2140 : SpinLockInit(&CheckpointerShmem->ckpt_lck);
976 2140 : CheckpointerShmem->max_requests = NBuffers;
977 2140 : ConditionVariableInit(&CheckpointerShmem->start_cv);
978 2140 : ConditionVariableInit(&CheckpointerShmem->done_cv);
979 : }
980 2140 : }
981 :
982 : /*
983 : * ExecCheckpoint
984 : * Primary entry point for manual CHECKPOINT commands
985 : *
986 : * This is mainly a wrapper for RequestCheckpoint().
987 : */
988 : void
989 882 : ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt)
990 : {
991 882 : bool fast = true;
992 882 : bool unlogged = false;
993 :
994 1770 : foreach_ptr(DefElem, opt, stmt->options)
995 : {
996 30 : if (strcmp(opt->defname, "mode") == 0)
997 : {
998 12 : char *mode = defGetString(opt);
999 :
1000 12 : if (strcmp(mode, "spread") == 0)
1001 0 : fast = false;
1002 12 : else if (strcmp(mode, "fast") != 0)
1003 6 : ereport(ERROR,
1004 : (errcode(ERRCODE_SYNTAX_ERROR),
1005 : errmsg("unrecognized MODE option \"%s\"", mode),
1006 : parser_errposition(pstate, opt->location)));
1007 : }
1008 18 : else if (strcmp(opt->defname, "flush_unlogged") == 0)
1009 12 : unlogged = defGetBoolean(opt);
1010 : else
1011 6 : ereport(ERROR,
1012 : (errcode(ERRCODE_SYNTAX_ERROR),
1013 : errmsg("unrecognized CHECKPOINT option \"%s\"", opt->defname),
1014 : parser_errposition(pstate, opt->location)));
1015 : }
1016 :
1017 870 : if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT))
1018 0 : ereport(ERROR,
1019 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1020 : /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */
1021 : errmsg("permission denied to execute %s command",
1022 : "CHECKPOINT"),
1023 : errdetail("Only roles with privileges of the \"%s\" role may execute this command.",
1024 : "pg_checkpoint")));
1025 :
1026 1740 : RequestCheckpoint(CHECKPOINT_WAIT |
1027 870 : (fast ? CHECKPOINT_FAST : 0) |
1028 870 : (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) |
1029 870 : (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
1030 870 : }
1031 :
1032 : /*
1033 : * RequestCheckpoint
1034 : * Called in backend processes to request a checkpoint
1035 : *
1036 : * flags is a bitwise OR of the following:
1037 : * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
1038 : * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
1039 : * CHECKPOINT_FAST: finish the checkpoint ASAP,
1040 : * ignoring checkpoint_completion_target parameter.
1041 : * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
1042 : * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
1043 : * CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command).
1044 : * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
1045 : * just signal checkpointer to do it, and return).
1046 : * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
1047 : * (This affects logging, and in particular enables CheckPointWarning.)
1048 : */
1049 : void
1050 5322 : RequestCheckpoint(int flags)
1051 : {
1052 : int ntries;
1053 : int old_failed,
1054 : old_started;
1055 :
1056 : /*
1057 : * If in a standalone backend, just do it ourselves.
1058 : */
1059 5322 : if (!IsPostmasterEnvironment)
1060 : {
1061 : /*
1062 : * There's no point in doing slow checkpoints in a standalone backend,
1063 : * because there's no other backends the checkpoint could disrupt.
1064 : */
1065 410 : CreateCheckPoint(flags | CHECKPOINT_FAST);
1066 :
1067 : /* Free all smgr objects, as CheckpointerMain() normally would. */
1068 410 : smgrdestroyall();
1069 :
1070 410 : return;
1071 : }
1072 :
1073 : /*
1074 : * Atomically set the request flags, and take a snapshot of the counters.
1075 : * When we see ckpt_started > old_started, we know the flags we set here
1076 : * have been seen by checkpointer.
1077 : *
1078 : * Note that we OR the flags with any existing flags, to avoid overriding
1079 : * a "stronger" request by another backend. The flag senses must be
1080 : * chosen to make this work!
1081 : */
1082 4912 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1083 :
1084 4912 : old_failed = CheckpointerShmem->ckpt_failed;
1085 4912 : old_started = CheckpointerShmem->ckpt_started;
1086 4912 : CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED);
1087 :
1088 4912 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1089 :
1090 : /*
1091 : * Set checkpointer's latch to request checkpoint. It's possible that the
1092 : * checkpointer hasn't started yet, so we will retry a few times if
1093 : * needed. (Actually, more than a few times, since on slow or overloaded
1094 : * buildfarm machines, it's been observed that the checkpointer can take
1095 : * several seconds to start.) However, if not told to wait for the
1096 : * checkpoint to occur, we consider failure to set the latch to be
1097 : * nonfatal and merely LOG it. The checkpointer should see the request
1098 : * when it does start, with or without the SetLatch().
1099 : */
1100 : #define MAX_SIGNAL_TRIES 600 /* max wait 60.0 sec */
1101 4912 : for (ntries = 0;; ntries++)
1102 32 : {
1103 4944 : volatile PROC_HDR *procglobal = ProcGlobal;
1104 4944 : ProcNumber checkpointerProc = procglobal->checkpointerProc;
1105 :
1106 4944 : if (checkpointerProc == INVALID_PROC_NUMBER)
1107 : {
1108 32 : if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
1109 : {
1110 0 : elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1111 : "could not notify checkpoint: checkpointer is not running");
1112 0 : break;
1113 : }
1114 : }
1115 : else
1116 : {
1117 4912 : SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1118 : /* notified successfully */
1119 4912 : break;
1120 : }
1121 :
1122 32 : CHECK_FOR_INTERRUPTS();
1123 32 : pg_usleep(100000L); /* wait 0.1 sec, then retry */
1124 : }
1125 :
1126 : /*
1127 : * If requested, wait for completion. We detect completion according to
1128 : * the algorithm given above.
1129 : */
1130 4912 : if (flags & CHECKPOINT_WAIT)
1131 : {
1132 : int new_started,
1133 : new_failed;
1134 :
1135 : /* Wait for a new checkpoint to start. */
1136 1594 : ConditionVariablePrepareToSleep(&CheckpointerShmem->start_cv);
1137 : for (;;)
1138 : {
1139 2948 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1140 2948 : new_started = CheckpointerShmem->ckpt_started;
1141 2948 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1142 :
1143 2948 : if (new_started != old_started)
1144 1594 : break;
1145 :
1146 1354 : ConditionVariableSleep(&CheckpointerShmem->start_cv,
1147 : WAIT_EVENT_CHECKPOINT_START);
1148 : }
1149 1594 : ConditionVariableCancelSleep();
1150 :
1151 : /*
1152 : * We are waiting for ckpt_done >= new_started, in a modulo sense.
1153 : */
1154 1594 : ConditionVariablePrepareToSleep(&CheckpointerShmem->done_cv);
1155 : for (;;)
1156 1380 : {
1157 : int new_done;
1158 :
1159 2974 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1160 2974 : new_done = CheckpointerShmem->ckpt_done;
1161 2974 : new_failed = CheckpointerShmem->ckpt_failed;
1162 2974 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1163 :
1164 2974 : if (new_done - new_started >= 0)
1165 1594 : break;
1166 :
1167 1380 : ConditionVariableSleep(&CheckpointerShmem->done_cv,
1168 : WAIT_EVENT_CHECKPOINT_DONE);
1169 : }
1170 1594 : ConditionVariableCancelSleep();
1171 :
1172 1594 : if (new_failed != old_failed)
1173 0 : ereport(ERROR,
1174 : (errmsg("checkpoint request failed"),
1175 : errhint("Consult recent messages in the server log for details.")));
1176 : }
1177 : }
1178 :
1179 : /*
1180 : * ForwardSyncRequest
1181 : * Forward a file-fsync request from a backend to the checkpointer
1182 : *
1183 : * Whenever a backend is compelled to write directly to a relation
1184 : * (which should be seldom, if the background writer is getting its job done),
1185 : * the backend calls this routine to pass over knowledge that the relation
1186 : * is dirty and must be fsync'd before next checkpoint. We also use this
1187 : * opportunity to count such writes for statistical purposes.
1188 : *
1189 : * To avoid holding the lock for longer than necessary, we normally write
1190 : * to the requests[] queue without checking for duplicates. The checkpointer
1191 : * will have to eliminate dups internally anyway. However, if we discover
1192 : * that the queue is full, we make a pass over the entire queue to compact
1193 : * it. This is somewhat expensive, but the alternative is for the backend
1194 : * to perform its own fsync, which is far more expensive in practice. It
1195 : * is theoretically possible a backend fsync might still be necessary, if
1196 : * the queue is full and contains no duplicate entries. In that case, we
1197 : * let the backend know by returning false.
1198 : */
1199 : bool
1200 2458956 : ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
1201 : {
1202 : CheckpointerRequest *request;
1203 : bool too_full;
1204 :
1205 2458956 : if (!IsUnderPostmaster)
1206 0 : return false; /* probably shouldn't even get here */
1207 :
1208 2458956 : if (AmCheckpointerProcess())
1209 0 : elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
1210 :
1211 2458956 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1212 :
1213 : /*
1214 : * If the checkpointer isn't running or the request queue is full, the
1215 : * backend will have to perform its own fsync request. But before forcing
1216 : * that to happen, we can try to compact the request queue.
1217 : */
1218 2458956 : if (CheckpointerShmem->checkpointer_pid == 0 ||
1219 2457616 : (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
1220 4260 : !CompactCheckpointerRequestQueue()))
1221 : {
1222 4908 : LWLockRelease(CheckpointerCommLock);
1223 4908 : return false;
1224 : }
1225 :
1226 : /* OK, insert request */
1227 2454048 : request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
1228 2454048 : request->ftag = *ftag;
1229 2454048 : request->type = type;
1230 :
1231 : /* If queue is more than half full, nudge the checkpointer to empty it */
1232 2454048 : too_full = (CheckpointerShmem->num_requests >=
1233 2454048 : CheckpointerShmem->max_requests / 2);
1234 :
1235 2454048 : LWLockRelease(CheckpointerCommLock);
1236 :
1237 : /* ... but not till after we release the lock */
1238 2454048 : if (too_full)
1239 : {
1240 62958 : volatile PROC_HDR *procglobal = ProcGlobal;
1241 62958 : ProcNumber checkpointerProc = procglobal->checkpointerProc;
1242 :
1243 62958 : if (checkpointerProc != INVALID_PROC_NUMBER)
1244 62958 : SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1245 : }
1246 :
1247 2454048 : return true;
1248 : }
1249 :
1250 : /*
1251 : * CompactCheckpointerRequestQueue
1252 : * Remove duplicates from the request queue to avoid backend fsyncs.
1253 : * Returns "true" if any entries were removed.
1254 : *
1255 : * Although a full fsync request queue is not common, it can lead to severe
1256 : * performance problems when it does happen. So far, this situation has
1257 : * only been observed to occur when the system is under heavy write load,
1258 : * and especially during the "sync" phase of a checkpoint. Without this
1259 : * logic, each backend begins doing an fsync for every block written, which
1260 : * gets very expensive and can slow down the whole system.
1261 : *
1262 : * Trying to do this every time the queue is full could lose if there
1263 : * aren't any removable entries. But that should be vanishingly rare in
1264 : * practice: there's one queue entry per shared buffer.
1265 : */
1266 : static bool
1267 4260 : CompactCheckpointerRequestQueue(void)
1268 : {
1269 : struct CheckpointerSlotMapping
1270 : {
1271 : CheckpointerRequest request;
1272 : int slot;
1273 : };
1274 :
1275 : int n,
1276 : preserve_count;
1277 4260 : int num_skipped = 0;
1278 : HASHCTL ctl;
1279 : HTAB *htab;
1280 : bool *skip_slot;
1281 :
1282 : /* must hold CheckpointerCommLock in exclusive mode */
1283 : Assert(LWLockHeldByMe(CheckpointerCommLock));
1284 :
1285 : /* Avoid memory allocations in a critical section. */
1286 4260 : if (CritSectionCount > 0)
1287 14 : return false;
1288 :
1289 : /* Initialize skip_slot array */
1290 4246 : skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
1291 :
1292 : /* Initialize temporary hash table */
1293 4246 : ctl.keysize = sizeof(CheckpointerRequest);
1294 4246 : ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
1295 4246 : ctl.hcxt = CurrentMemoryContext;
1296 :
1297 4246 : htab = hash_create("CompactCheckpointerRequestQueue",
1298 4246 : CheckpointerShmem->num_requests,
1299 : &ctl,
1300 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1301 :
1302 : /*
1303 : * The basic idea here is that a request can be skipped if it's followed
1304 : * by a later, identical request. It might seem more sensible to work
1305 : * backwards from the end of the queue and check whether a request is
1306 : * *preceded* by an earlier, identical request, in the hopes of doing less
1307 : * copying. But that might change the semantics, if there's an
1308 : * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
1309 : * this way. It would be possible to be even smarter if we made the code
1310 : * below understand the specific semantics of such requests (it could blow
1311 : * away preceding entries that would end up being canceled anyhow), but
1312 : * it's not clear that the extra complexity would buy us anything.
1313 : */
1314 507638 : for (n = 0; n < CheckpointerShmem->num_requests; n++)
1315 : {
1316 : CheckpointerRequest *request;
1317 : struct CheckpointerSlotMapping *slotmap;
1318 : bool found;
1319 :
1320 : /*
1321 : * We use the request struct directly as a hashtable key. This
1322 : * assumes that any padding bytes in the structs are consistently the
1323 : * same, which should be okay because we zeroed them in
1324 : * CheckpointerShmemInit. Note also that RelFileLocator had better
1325 : * contain no pad bytes.
1326 : */
1327 503392 : request = &CheckpointerShmem->requests[n];
1328 503392 : slotmap = hash_search(htab, request, HASH_ENTER, &found);
1329 503392 : if (found)
1330 : {
1331 : /* Duplicate, so mark the previous occurrence as skippable */
1332 28816 : skip_slot[slotmap->slot] = true;
1333 28816 : num_skipped++;
1334 : }
1335 : /* Remember slot containing latest occurrence of this request value */
1336 503392 : slotmap->slot = n;
1337 : }
1338 :
1339 : /* Done with the hash table. */
1340 4246 : hash_destroy(htab);
1341 :
1342 : /* If no duplicates, we're out of luck. */
1343 4246 : if (!num_skipped)
1344 : {
1345 3554 : pfree(skip_slot);
1346 3554 : return false;
1347 : }
1348 :
1349 : /* We found some duplicates; remove them. */
1350 692 : preserve_count = 0;
1351 81652 : for (n = 0; n < CheckpointerShmem->num_requests; n++)
1352 : {
1353 80960 : if (skip_slot[n])
1354 28816 : continue;
1355 52144 : CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
1356 : }
1357 692 : ereport(DEBUG1,
1358 : (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
1359 : CheckpointerShmem->num_requests, preserve_count)));
1360 692 : CheckpointerShmem->num_requests = preserve_count;
1361 :
1362 : /* Cleanup. */
1363 692 : pfree(skip_slot);
1364 692 : return true;
1365 : }
1366 :
1367 : /*
1368 : * AbsorbSyncRequests
1369 : * Retrieve queued sync requests and pass them to sync mechanism.
1370 : *
1371 : * This is exported because it must be called during CreateCheckPoint;
1372 : * we have to be sure we have accepted all pending requests just before
1373 : * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1374 : * non-checkpointer processes, do nothing if not checkpointer.
1375 : */
1376 : void
1377 40244 : AbsorbSyncRequests(void)
1378 : {
1379 40244 : CheckpointerRequest *requests = NULL;
1380 : CheckpointerRequest *request;
1381 : int n;
1382 :
1383 40244 : if (!AmCheckpointerProcess())
1384 1264 : return;
1385 :
1386 38980 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1387 :
1388 : /*
1389 : * We try to avoid holding the lock for a long time by copying the request
1390 : * array, and processing the requests after releasing the lock.
1391 : *
1392 : * Once we have cleared the requests from shared memory, we have to PANIC
1393 : * if we then fail to absorb them (eg, because our hashtable runs out of
1394 : * memory). This is because the system cannot run safely if we are unable
1395 : * to fsync what we have been told to fsync. Fortunately, the hashtable
1396 : * is so small that the problem is quite unlikely to arise in practice.
1397 : */
1398 38980 : n = CheckpointerShmem->num_requests;
1399 38980 : if (n > 0)
1400 : {
1401 19832 : requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
1402 19832 : memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
1403 : }
1404 :
1405 38980 : START_CRIT_SECTION();
1406 :
1407 38980 : CheckpointerShmem->num_requests = 0;
1408 :
1409 38980 : LWLockRelease(CheckpointerCommLock);
1410 :
1411 2267526 : for (request = requests; n > 0; request++, n--)
1412 2228546 : RememberSyncRequest(&request->ftag, request->type);
1413 :
1414 38980 : END_CRIT_SECTION();
1415 :
1416 38980 : if (requests)
1417 19832 : pfree(requests);
1418 : }
1419 :
1420 : /*
1421 : * Update any shared memory configurations based on config parameters
1422 : */
1423 : static void
1424 1144 : UpdateSharedMemoryConfig(void)
1425 : {
1426 : /* update global shmem state for sync rep */
1427 1144 : SyncRepUpdateSyncStandbysDefined();
1428 :
1429 : /*
1430 : * If full_page_writes has been changed by SIGHUP, we update it in shared
1431 : * memory and write an XLOG_FPW_CHANGE record.
1432 : */
1433 1144 : UpdateFullPageWrites();
1434 :
1435 1144 : elog(DEBUG2, "checkpointer updated shared memory configuration values");
1436 1144 : }
1437 :
1438 : /*
1439 : * FirstCallSinceLastCheckpoint allows a process to take an action once
1440 : * per checkpoint cycle by asynchronously checking for checkpoint completion.
1441 : */
1442 : bool
1443 25578 : FirstCallSinceLastCheckpoint(void)
1444 : {
1445 : static int ckpt_done = 0;
1446 : int new_done;
1447 25578 : bool FirstCall = false;
1448 :
1449 25578 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1450 25578 : new_done = CheckpointerShmem->ckpt_done;
1451 25578 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1452 :
1453 25578 : if (new_done != ckpt_done)
1454 1168 : FirstCall = true;
1455 :
1456 25578 : ckpt_done = new_done;
1457 :
1458 25578 : return FirstCall;
1459 : }
|