Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * checkpointer.c
4 : *
5 : * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
6 : * Checkpoints are automatically dispatched after a certain amount of time has
7 : * elapsed since the last one, and it can be signaled to perform requested
8 : * checkpoints as well. (The GUC parameter that mandates a checkpoint every
9 : * so many WAL segments is implemented by having backends signal when they
10 : * fill WAL segments; the checkpointer itself doesn't watch for the
11 : * condition.)
12 : *
13 : * The normal termination sequence is that checkpointer is instructed to
14 : * execute the shutdown checkpoint by SIGINT. After that checkpointer waits
15 : * to be terminated via SIGUSR2, which instructs the checkpointer to exit(0).
16 : * All backends must be stopped before SIGINT or SIGUSR2 is issued!
17 : *
18 : * Emergency termination is by SIGQUIT; like any backend, the checkpointer
19 : * will simply abort and exit on SIGQUIT.
20 : *
21 : * If the checkpointer exits unexpectedly, the postmaster treats that the same
22 : * as a backend crash: shared memory may be corrupted, so remaining backends
23 : * should be killed by SIGQUIT and then a recovery cycle started. (Even if
24 : * shared memory isn't corrupted, we have lost information about which
25 : * files need to be fsync'd for the next checkpoint, and so a system
26 : * restart needs to be forced.)
27 : *
28 : *
29 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 : *
31 : *
32 : * IDENTIFICATION
33 : * src/backend/postmaster/checkpointer.c
34 : *
35 : *-------------------------------------------------------------------------
36 : */
37 : #include "postgres.h"
38 :
39 : #include <sys/time.h>
40 : #include <time.h>
41 :
42 : #include "access/xlog.h"
43 : #include "access/xlog_internal.h"
44 : #include "access/xlogrecovery.h"
45 : #include "libpq/pqsignal.h"
46 : #include "miscadmin.h"
47 : #include "pgstat.h"
48 : #include "postmaster/auxprocess.h"
49 : #include "postmaster/bgwriter.h"
50 : #include "postmaster/interrupt.h"
51 : #include "replication/syncrep.h"
52 : #include "storage/bufmgr.h"
53 : #include "storage/condition_variable.h"
54 : #include "storage/fd.h"
55 : #include "storage/ipc.h"
56 : #include "storage/lwlock.h"
57 : #include "storage/pmsignal.h"
58 : #include "storage/proc.h"
59 : #include "storage/procsignal.h"
60 : #include "storage/shmem.h"
61 : #include "storage/smgr.h"
62 : #include "storage/spin.h"
63 : #include "utils/guc.h"
64 : #include "utils/memutils.h"
65 : #include "utils/resowner.h"
66 :
67 :
68 : /*----------
69 : * Shared memory area for communication between checkpointer and backends
70 : *
71 : * The ckpt counters allow backends to watch for completion of a checkpoint
72 : * request they send. Here's how it works:
73 : * * At start of a checkpoint, checkpointer reads (and clears) the request
74 : * flags and increments ckpt_started, while holding ckpt_lck.
75 : * * On completion of a checkpoint, checkpointer sets ckpt_done to
76 : * equal ckpt_started.
77 : * * On failure of a checkpoint, checkpointer increments ckpt_failed
78 : * and sets ckpt_done to equal ckpt_started.
79 : *
80 : * The algorithm for backends is:
81 : * 1. Record current values of ckpt_failed and ckpt_started, and
82 : * set request flags, while holding ckpt_lck.
83 : * 2. Send signal to request checkpoint.
84 : * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
85 : * begun since you started this algorithm (although *not* that it was
86 : * specifically initiated by your signal), and that it is using your flags.
87 : * 4. Record new value of ckpt_started.
88 : * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
89 : * arithmetic here in case counters wrap around.) Now you know a
90 : * checkpoint has started and completed, but not whether it was
91 : * successful.
92 : * 6. If ckpt_failed is different from the originally saved value,
93 : * assume request failed; otherwise it was definitely successful.
94 : *
95 : * ckpt_flags holds the OR of the checkpoint request flags sent by all
96 : * requesting backends since the last checkpoint start. The flags are
97 : * chosen so that OR'ing is the correct way to combine multiple requests.
98 : *
99 : * The requests array holds fsync requests sent by backends and not yet
100 : * absorbed by the checkpointer.
101 : *
102 : * Unlike the checkpoint fields, requests related fields are protected by
103 : * CheckpointerCommLock.
104 : *----------
105 : */
106 : typedef struct
107 : {
108 : SyncRequestType type; /* request type */
109 : FileTag ftag; /* file identifier */
110 : } CheckpointerRequest;
111 :
112 : typedef struct
113 : {
114 : pid_t checkpointer_pid; /* PID (0 if not started) */
115 :
116 : slock_t ckpt_lck; /* protects all the ckpt_* fields */
117 :
118 : int ckpt_started; /* advances when checkpoint starts */
119 : int ckpt_done; /* advances when checkpoint done */
120 : int ckpt_failed; /* advances when checkpoint fails */
121 :
122 : int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
123 :
124 : ConditionVariable start_cv; /* signaled when ckpt_started advances */
125 : ConditionVariable done_cv; /* signaled when ckpt_done advances */
126 :
127 : int num_requests; /* current # of requests */
128 : int max_requests; /* allocated array size */
129 : CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
130 : } CheckpointerShmemStruct;
131 :
132 : static CheckpointerShmemStruct *CheckpointerShmem;
133 :
134 : /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
135 : #define WRITES_PER_ABSORB 1000
136 :
137 : /*
138 : * GUC parameters
139 : */
140 : int CheckPointTimeout = 300;
141 : int CheckPointWarning = 30;
142 : double CheckPointCompletionTarget = 0.9;
143 :
144 : /*
145 : * Private state
146 : */
147 : static bool ckpt_active = false;
148 : static volatile sig_atomic_t ShutdownXLOGPending = false;
149 :
150 : /* these values are valid when ckpt_active is true: */
151 : static pg_time_t ckpt_start_time;
152 : static XLogRecPtr ckpt_start_recptr;
153 : static double ckpt_cached_elapsed;
154 :
155 : static pg_time_t last_checkpoint_time;
156 : static pg_time_t last_xlog_switch_time;
157 :
158 : /* Prototypes for private functions */
159 :
160 : static void HandleCheckpointerInterrupts(void);
161 : static void CheckArchiveTimeout(void);
162 : static bool IsCheckpointOnSchedule(double progress);
163 : static bool ImmediateCheckpointRequested(void);
164 : static bool CompactCheckpointerRequestQueue(void);
165 : static void UpdateSharedMemoryConfig(void);
166 :
167 : /* Signal handlers */
168 : static void ReqShutdownXLOG(SIGNAL_ARGS);
169 :
170 :
171 : /*
172 : * Main entry point for checkpointer process
173 : *
174 : * This is invoked from AuxiliaryProcessMain, which has already created the
175 : * basic execution environment, but not enabled signals yet.
176 : */
177 : void
178 910 : CheckpointerMain(const void *startup_data, size_t startup_data_len)
179 : {
180 : sigjmp_buf local_sigjmp_buf;
181 : MemoryContext checkpointer_context;
182 :
183 : Assert(startup_data_len == 0);
184 :
185 910 : MyBackendType = B_CHECKPOINTER;
186 910 : AuxiliaryProcessMainCommon();
187 :
188 910 : CheckpointerShmem->checkpointer_pid = MyProcPid;
189 :
190 : /*
191 : * Properly accept or ignore signals the postmaster might send us
192 : *
193 : * Note: we deliberately ignore SIGTERM, because during a standard Unix
194 : * system shutdown cycle, init will SIGTERM all processes at once. We
195 : * want to wait for the backends to exit, whereupon the postmaster will
196 : * tell us it's okay to shut down (via SIGUSR2).
197 : */
198 910 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
199 910 : pqsignal(SIGINT, ReqShutdownXLOG);
200 910 : pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
201 : /* SIGQUIT handler was already set up by InitPostmasterChild */
202 910 : pqsignal(SIGALRM, SIG_IGN);
203 910 : pqsignal(SIGPIPE, SIG_IGN);
204 910 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
205 910 : pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
206 :
207 : /*
208 : * Reset some signals that are accepted by postmaster but not here
209 : */
210 910 : pqsignal(SIGCHLD, SIG_DFL);
211 :
212 : /*
213 : * Initialize so that first time-driven event happens at the correct time.
214 : */
215 910 : last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
216 :
217 : /*
218 : * Write out stats after shutdown. This needs to be called by exactly one
219 : * process during a normal shutdown, and since checkpointer is shut down
220 : * very late...
221 : *
222 : * While e.g. walsenders are active after the shutdown checkpoint has been
223 : * written (and thus could produce more stats), checkpointer stays around
224 : * after the shutdown checkpoint has been written. postmaster will only
225 : * signal checkpointer to exit after all processes that could emit stats
226 : * have been shut down.
227 : */
228 910 : before_shmem_exit(pgstat_before_server_shutdown, 0);
229 :
230 : /*
231 : * Create a memory context that we will do all our work in. We do this so
232 : * that we can reset the context during error recovery and thereby avoid
233 : * possible memory leaks. Formerly this code just ran in
234 : * TopMemoryContext, but resetting that would be a really bad idea.
235 : */
236 910 : checkpointer_context = AllocSetContextCreate(TopMemoryContext,
237 : "Checkpointer",
238 : ALLOCSET_DEFAULT_SIZES);
239 910 : MemoryContextSwitchTo(checkpointer_context);
240 :
241 : /*
242 : * If an exception is encountered, processing resumes here.
243 : *
244 : * You might wonder why this isn't coded as an infinite loop around a
245 : * PG_TRY construct. The reason is that this is the bottom of the
246 : * exception stack, and so with PG_TRY there would be no exception handler
247 : * in force at all during the CATCH part. By leaving the outermost setjmp
248 : * always active, we have at least some chance of recovering from an error
249 : * during error recovery. (If we get into an infinite loop thereby, it
250 : * will soon be stopped by overflow of elog.c's internal state stack.)
251 : *
252 : * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
253 : * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
254 : * signals other than SIGQUIT will be blocked until we complete error
255 : * recovery. It might seem that this policy makes the HOLD_INTERRUPTS()
256 : * call redundant, but it is not since InterruptPending might be set
257 : * already.
258 : */
259 910 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
260 : {
261 : /* Since not using PG_TRY, must reset error stack by hand */
262 0 : error_context_stack = NULL;
263 :
264 : /* Prevent interrupts while cleaning up */
265 0 : HOLD_INTERRUPTS();
266 :
267 : /* Report the error to the server log */
268 0 : EmitErrorReport();
269 :
270 : /*
271 : * These operations are really just a minimal subset of
272 : * AbortTransaction(). We don't have very many resources to worry
273 : * about in checkpointer, but we do have LWLocks, buffers, and temp
274 : * files.
275 : */
276 0 : LWLockReleaseAll();
277 0 : ConditionVariableCancelSleep();
278 0 : pgstat_report_wait_end();
279 0 : UnlockBuffers();
280 0 : ReleaseAuxProcessResources(false);
281 0 : AtEOXact_Buffers(false);
282 0 : AtEOXact_SMgr();
283 0 : AtEOXact_Files(false);
284 0 : AtEOXact_HashTables(false);
285 :
286 : /* Warn any waiting backends that the checkpoint failed. */
287 0 : if (ckpt_active)
288 : {
289 0 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
290 0 : CheckpointerShmem->ckpt_failed++;
291 0 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
292 0 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
293 :
294 0 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
295 :
296 0 : ckpt_active = false;
297 : }
298 :
299 : /*
300 : * Now return to normal top-level context and clear ErrorContext for
301 : * next time.
302 : */
303 0 : MemoryContextSwitchTo(checkpointer_context);
304 0 : FlushErrorState();
305 :
306 : /* Flush any leaked data in the top-level context */
307 0 : MemoryContextReset(checkpointer_context);
308 :
309 : /* Now we can allow interrupts again */
310 0 : RESUME_INTERRUPTS();
311 :
312 : /*
313 : * Sleep at least 1 second after any error. A write error is likely
314 : * to be repeated, and we don't want to be filling the error logs as
315 : * fast as we can.
316 : */
317 0 : pg_usleep(1000000L);
318 : }
319 :
320 : /* We can now handle ereport(ERROR) */
321 910 : PG_exception_stack = &local_sigjmp_buf;
322 :
323 : /*
324 : * Unblock signals (they were blocked when the postmaster forked us)
325 : */
326 910 : sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
327 :
328 : /*
329 : * Ensure all shared memory values are set correctly for the config. Doing
330 : * this here ensures no race conditions from other concurrent updaters.
331 : */
332 910 : UpdateSharedMemoryConfig();
333 :
334 : /*
335 : * Advertise our proc number that backends can use to wake us up while
336 : * we're sleeping.
337 : */
338 910 : ProcGlobal->checkpointerProc = MyProcNumber;
339 :
340 : /*
341 : * Loop until we've been asked to write the shutdown checkpoint or
342 : * terminate.
343 : */
344 : for (;;)
345 5560 : {
346 6470 : bool do_checkpoint = false;
347 6470 : int flags = 0;
348 : pg_time_t now;
349 : int elapsed_secs;
350 : int cur_timeout;
351 6470 : bool chkpt_or_rstpt_requested = false;
352 6470 : bool chkpt_or_rstpt_timed = false;
353 :
354 : /* Clear any already-pending wakeups */
355 6470 : ResetLatch(MyLatch);
356 :
357 : /*
358 : * Process any requests or signals received recently.
359 : */
360 6470 : AbsorbSyncRequests();
361 :
362 6470 : HandleCheckpointerInterrupts();
363 6470 : if (ShutdownXLOGPending || ShutdownRequestPending)
364 : break;
365 :
366 : /*
367 : * Detect a pending checkpoint request by checking whether the flags
368 : * word in shared memory is nonzero. We shouldn't need to acquire the
369 : * ckpt_lck for this.
370 : */
371 5584 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
372 : {
373 1180 : do_checkpoint = true;
374 1180 : chkpt_or_rstpt_requested = true;
375 : }
376 :
377 : /*
378 : * Force a checkpoint if too much time has elapsed since the last one.
379 : * Note that we count a timed checkpoint in stats only when this
380 : * occurs without an external request, but we set the CAUSE_TIME flag
381 : * bit even if there is also an external request.
382 : */
383 5584 : now = (pg_time_t) time(NULL);
384 5584 : elapsed_secs = now - last_checkpoint_time;
385 5584 : if (elapsed_secs >= CheckPointTimeout)
386 : {
387 2 : if (!do_checkpoint)
388 2 : chkpt_or_rstpt_timed = true;
389 2 : do_checkpoint = true;
390 2 : flags |= CHECKPOINT_CAUSE_TIME;
391 : }
392 :
393 : /*
394 : * Do a checkpoint if requested.
395 : */
396 5584 : if (do_checkpoint)
397 : {
398 1182 : bool ckpt_performed = false;
399 : bool do_restartpoint;
400 :
401 : /* Check if we should perform a checkpoint or a restartpoint. */
402 1182 : do_restartpoint = RecoveryInProgress();
403 :
404 : /*
405 : * Atomically fetch the request flags to figure out what kind of a
406 : * checkpoint we should perform, and increase the started-counter
407 : * to acknowledge that we've started a new checkpoint.
408 : */
409 1182 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
410 1182 : flags |= CheckpointerShmem->ckpt_flags;
411 1182 : CheckpointerShmem->ckpt_flags = 0;
412 1182 : CheckpointerShmem->ckpt_started++;
413 1182 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
414 :
415 1182 : ConditionVariableBroadcast(&CheckpointerShmem->start_cv);
416 :
417 : /*
418 : * The end-of-recovery checkpoint is a real checkpoint that's
419 : * performed while we're still in recovery.
420 : */
421 1182 : if (flags & CHECKPOINT_END_OF_RECOVERY)
422 40 : do_restartpoint = false;
423 :
424 1182 : if (chkpt_or_rstpt_timed)
425 : {
426 2 : chkpt_or_rstpt_timed = false;
427 2 : if (do_restartpoint)
428 0 : PendingCheckpointerStats.restartpoints_timed++;
429 : else
430 2 : PendingCheckpointerStats.num_timed++;
431 : }
432 :
433 1182 : if (chkpt_or_rstpt_requested)
434 : {
435 1180 : chkpt_or_rstpt_requested = false;
436 1180 : if (do_restartpoint)
437 412 : PendingCheckpointerStats.restartpoints_requested++;
438 : else
439 768 : PendingCheckpointerStats.num_requested++;
440 : }
441 :
442 : /*
443 : * We will warn if (a) too soon since last checkpoint (whatever
444 : * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
445 : * since the last checkpoint start. Note in particular that this
446 : * implementation will not generate warnings caused by
447 : * CheckPointTimeout < CheckPointWarning.
448 : */
449 1182 : if (!do_restartpoint &&
450 770 : (flags & CHECKPOINT_CAUSE_XLOG) &&
451 370 : elapsed_secs < CheckPointWarning)
452 370 : ereport(LOG,
453 : (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
454 : "checkpoints are occurring too frequently (%d seconds apart)",
455 : elapsed_secs,
456 : elapsed_secs),
457 : errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
458 :
459 : /*
460 : * Initialize checkpointer-private variables used during
461 : * checkpoint.
462 : */
463 1182 : ckpt_active = true;
464 1182 : if (do_restartpoint)
465 412 : ckpt_start_recptr = GetXLogReplayRecPtr(NULL);
466 : else
467 770 : ckpt_start_recptr = GetInsertRecPtr();
468 1182 : ckpt_start_time = now;
469 1182 : ckpt_cached_elapsed = 0;
470 :
471 : /*
472 : * Do the checkpoint.
473 : */
474 1182 : if (!do_restartpoint)
475 770 : ckpt_performed = CreateCheckPoint(flags);
476 : else
477 412 : ckpt_performed = CreateRestartPoint(flags);
478 :
479 : /*
480 : * After any checkpoint, free all smgr objects. Otherwise we
481 : * would never do so for dropped relations, as the checkpointer
482 : * does not process shared invalidation messages or call
483 : * AtEOXact_SMgr().
484 : */
485 1182 : smgrdestroyall();
486 :
487 : /*
488 : * Indicate checkpoint completion to any waiting backends.
489 : */
490 1182 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
491 1182 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
492 1182 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
493 :
494 1182 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
495 :
496 1182 : if (!do_restartpoint)
497 : {
498 : /*
499 : * Note we record the checkpoint start time not end time as
500 : * last_checkpoint_time. This is so that time-driven
501 : * checkpoints happen at a predictable spacing.
502 : */
503 770 : last_checkpoint_time = now;
504 :
505 770 : if (ckpt_performed)
506 770 : PendingCheckpointerStats.num_performed++;
507 : }
508 : else
509 : {
510 412 : if (ckpt_performed)
511 : {
512 : /*
513 : * The same as for checkpoint. Please see the
514 : * corresponding comment.
515 : */
516 336 : last_checkpoint_time = now;
517 :
518 336 : PendingCheckpointerStats.restartpoints_performed++;
519 : }
520 : else
521 : {
522 : /*
523 : * We were not able to perform the restartpoint
524 : * (checkpoints throw an ERROR in case of error). Most
525 : * likely because we have not received any new checkpoint
526 : * WAL records since the last restartpoint. Try again in
527 : * 15 s.
528 : */
529 76 : last_checkpoint_time = now - CheckPointTimeout + 15;
530 : }
531 : }
532 :
533 1182 : ckpt_active = false;
534 :
535 : /*
536 : * We may have received an interrupt during the checkpoint and the
537 : * latch might have been reset (e.g. in CheckpointWriteDelay).
538 : */
539 1182 : HandleCheckpointerInterrupts();
540 1182 : if (ShutdownXLOGPending || ShutdownRequestPending)
541 : break;
542 : }
543 :
544 : /* Check for archive_timeout and switch xlog files if necessary. */
545 5566 : CheckArchiveTimeout();
546 :
547 : /* Report pending statistics to the cumulative stats system */
548 5566 : pgstat_report_checkpointer();
549 5566 : pgstat_report_wal(true);
550 :
551 : /*
552 : * If any checkpoint flags have been set, redo the loop to handle the
553 : * checkpoint without sleeping.
554 : */
555 5566 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
556 444 : continue;
557 :
558 : /*
559 : * Sleep until we are signaled or it's time for another checkpoint or
560 : * xlog file switch.
561 : */
562 5122 : now = (pg_time_t) time(NULL);
563 5122 : elapsed_secs = now - last_checkpoint_time;
564 5122 : if (elapsed_secs >= CheckPointTimeout)
565 0 : continue; /* no sleep for us ... */
566 5122 : cur_timeout = CheckPointTimeout - elapsed_secs;
567 5122 : if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
568 : {
569 0 : elapsed_secs = now - last_xlog_switch_time;
570 0 : if (elapsed_secs >= XLogArchiveTimeout)
571 0 : continue; /* no sleep for us ... */
572 0 : cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
573 : }
574 :
575 5122 : (void) WaitLatch(MyLatch,
576 : WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
577 : cur_timeout * 1000L /* convert to ms */ ,
578 : WAIT_EVENT_CHECKPOINTER_MAIN);
579 : }
580 :
581 : /*
582 : * From here on, elog(ERROR) should end with exit(1), not send control
583 : * back to the sigsetjmp block above.
584 : */
585 904 : ExitOnAnyError = true;
586 :
587 904 : if (ShutdownXLOGPending)
588 : {
589 : /*
590 : * Close down the database.
591 : *
592 : * Since ShutdownXLOG() creates restartpoint or checkpoint, and
593 : * updates the statistics, increment the checkpoint request and flush
594 : * out pending statistic.
595 : */
596 904 : PendingCheckpointerStats.num_requested++;
597 904 : ShutdownXLOG(0, 0);
598 904 : pgstat_report_checkpointer();
599 904 : pgstat_report_wal(true);
600 :
601 : /*
602 : * Tell postmaster that we're done.
603 : */
604 904 : SendPostmasterSignal(PMSIGNAL_XLOG_IS_SHUTDOWN);
605 904 : ShutdownXLOGPending = false;
606 : }
607 :
608 : /*
609 : * Wait until we're asked to shut down. By separating the writing of the
610 : * shutdown checkpoint from checkpointer exiting, checkpointer can perform
611 : * some should-be-as-late-as-possible work like writing out stats.
612 : */
613 : for (;;)
614 : {
615 : /* Clear any already-pending wakeups */
616 1688 : ResetLatch(MyLatch);
617 :
618 1688 : HandleCheckpointerInterrupts();
619 :
620 1688 : if (ShutdownRequestPending)
621 904 : break;
622 :
623 784 : (void) WaitLatch(MyLatch,
624 : WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
625 : 0,
626 : WAIT_EVENT_CHECKPOINTER_SHUTDOWN);
627 : }
628 :
629 : /* Normal exit from the checkpointer is here */
630 904 : proc_exit(0); /* done */
631 : }
632 :
633 : /*
634 : * Process any new interrupts.
635 : */
636 : static void
637 9340 : HandleCheckpointerInterrupts(void)
638 : {
639 9340 : if (ProcSignalBarrierPending)
640 102 : ProcessProcSignalBarrier();
641 :
642 9340 : if (ConfigReloadPending)
643 : {
644 102 : ConfigReloadPending = false;
645 102 : ProcessConfigFile(PGC_SIGHUP);
646 :
647 : /*
648 : * Checkpointer is the last process to shut down, so we ask it to hold
649 : * the keys for a range of other tasks required most of which have
650 : * nothing to do with checkpointing at all.
651 : *
652 : * For various reasons, some config values can change dynamically so
653 : * the primary copy of them is held in shared memory to make sure all
654 : * backends see the same value. We make Checkpointer responsible for
655 : * updating the shared memory copy if the parameter setting changes
656 : * because of SIGHUP.
657 : */
658 102 : UpdateSharedMemoryConfig();
659 : }
660 :
661 : /* Perform logging of memory contexts of this process */
662 9340 : if (LogMemoryContextPending)
663 2 : ProcessLogMemoryContextInterrupt();
664 9340 : }
665 :
666 : /*
667 : * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
668 : *
669 : * This will switch to a new WAL file and force an archive file write if
670 : * meaningful activity is recorded in the current WAL file. This includes most
671 : * writes, including just a single checkpoint record, but excludes WAL records
672 : * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
673 : * snapshots of running transactions). Such records, depending on
674 : * configuration, occur on regular intervals and don't contain important
675 : * information. This avoids generating archives with a few unimportant
676 : * records.
677 : */
678 : static void
679 19200 : CheckArchiveTimeout(void)
680 : {
681 : pg_time_t now;
682 : pg_time_t last_time;
683 : XLogRecPtr last_switch_lsn;
684 :
685 19200 : if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
686 19200 : return;
687 :
688 0 : now = (pg_time_t) time(NULL);
689 :
690 : /* First we do a quick check using possibly-stale local state. */
691 0 : if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
692 0 : return;
693 :
694 : /*
695 : * Update local state ... note that last_xlog_switch_time is the last time
696 : * a switch was performed *or requested*.
697 : */
698 0 : last_time = GetLastSegSwitchData(&last_switch_lsn);
699 :
700 0 : last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
701 :
702 : /* Now we can do the real checks */
703 0 : if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
704 : {
705 : /*
706 : * Switch segment only when "important" WAL has been logged since the
707 : * last segment switch (last_switch_lsn points to end of segment
708 : * switch occurred in).
709 : */
710 0 : if (GetLastImportantRecPtr() > last_switch_lsn)
711 : {
712 : XLogRecPtr switchpoint;
713 :
714 : /* mark switch as unimportant, avoids triggering checkpoints */
715 0 : switchpoint = RequestXLogSwitch(true);
716 :
717 : /*
718 : * If the returned pointer points exactly to a segment boundary,
719 : * assume nothing happened.
720 : */
721 0 : if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
722 0 : elog(DEBUG1, "write-ahead log switch forced (\"archive_timeout\"=%d)",
723 : XLogArchiveTimeout);
724 : }
725 :
726 : /*
727 : * Update state in any case, so we don't retry constantly when the
728 : * system is idle.
729 : */
730 0 : last_xlog_switch_time = now;
731 : }
732 : }
733 :
734 : /*
735 : * Returns true if an immediate checkpoint request is pending. (Note that
736 : * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
737 : * there is one pending behind it.)
738 : */
739 : static bool
740 95204 : ImmediateCheckpointRequested(void)
741 : {
742 95204 : volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
743 :
744 : /*
745 : * We don't need to acquire the ckpt_lck in this case because we're only
746 : * looking at a single flag bit.
747 : */
748 95204 : if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE)
749 10288 : return true;
750 84916 : return false;
751 : }
752 :
753 : /*
754 : * CheckpointWriteDelay -- control rate of checkpoint
755 : *
756 : * This function is called after each page write performed by BufferSync().
757 : * It is responsible for throttling BufferSync()'s write rate to hit
758 : * checkpoint_completion_target.
759 : *
760 : * The checkpoint request flags should be passed in; currently the only one
761 : * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
762 : *
763 : * 'progress' is an estimate of how much of the work has been done, as a
764 : * fraction between 0.0 meaning none, and 1.0 meaning all done.
765 : */
766 : void
767 518382 : CheckpointWriteDelay(int flags, double progress)
768 : {
769 : static int absorb_counter = WRITES_PER_ABSORB;
770 :
771 : /* Do nothing if checkpoint is being executed by non-checkpointer process */
772 518382 : if (!AmCheckpointerProcess())
773 90030 : return;
774 :
775 : /*
776 : * Perform the usual duties and take a nap, unless we're behind schedule,
777 : * in which case we just try to catch up as quickly as possible.
778 : */
779 428352 : if (!(flags & CHECKPOINT_IMMEDIATE) &&
780 95618 : !ShutdownXLOGPending &&
781 95204 : !ShutdownRequestPending &&
782 180120 : !ImmediateCheckpointRequested() &&
783 84916 : IsCheckpointOnSchedule(progress))
784 : {
785 13634 : if (ConfigReloadPending)
786 : {
787 0 : ConfigReloadPending = false;
788 0 : ProcessConfigFile(PGC_SIGHUP);
789 : /* update shmem copies of config variables */
790 0 : UpdateSharedMemoryConfig();
791 : }
792 :
793 13634 : AbsorbSyncRequests();
794 13634 : absorb_counter = WRITES_PER_ABSORB;
795 :
796 13634 : CheckArchiveTimeout();
797 :
798 : /* Report interim statistics to the cumulative stats system */
799 13634 : pgstat_report_checkpointer();
800 :
801 : /*
802 : * This sleep used to be connected to bgwriter_delay, typically 200ms.
803 : * That resulted in more frequent wakeups if not much work to do.
804 : * Checkpointer and bgwriter are no longer related so take the Big
805 : * Sleep.
806 : */
807 13634 : WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT,
808 : 100,
809 : WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
810 13634 : ResetLatch(MyLatch);
811 : }
812 414718 : else if (--absorb_counter <= 0)
813 : {
814 : /*
815 : * Absorb pending fsync requests after each WRITES_PER_ABSORB write
816 : * operations even when we don't sleep, to prevent overflow of the
817 : * fsync request queue.
818 : */
819 168 : AbsorbSyncRequests();
820 168 : absorb_counter = WRITES_PER_ABSORB;
821 : }
822 :
823 : /* Check for barrier events. */
824 428352 : if (ProcSignalBarrierPending)
825 8 : ProcessProcSignalBarrier();
826 : }
827 :
828 : /*
829 : * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
830 : * (or restartpoint) in time?
831 : *
832 : * Compares the current progress against the time/segments elapsed since last
833 : * checkpoint, and returns true if the progress we've made this far is greater
834 : * than the elapsed time/segments.
835 : */
836 : static bool
837 84916 : IsCheckpointOnSchedule(double progress)
838 : {
839 : XLogRecPtr recptr;
840 : struct timeval now;
841 : double elapsed_xlogs,
842 : elapsed_time;
843 :
844 : Assert(ckpt_active);
845 :
846 : /* Scale progress according to checkpoint_completion_target. */
847 84916 : progress *= CheckPointCompletionTarget;
848 :
849 : /*
850 : * Check against the cached value first. Only do the more expensive
851 : * calculations once we reach the target previously calculated. Since
852 : * neither time or WAL insert pointer moves backwards, a freshly
853 : * calculated value can only be greater than or equal to the cached value.
854 : */
855 84916 : if (progress < ckpt_cached_elapsed)
856 64534 : return false;
857 :
858 : /*
859 : * Check progress against WAL segments written and CheckPointSegments.
860 : *
861 : * We compare the current WAL insert location against the location
862 : * computed before calling CreateCheckPoint. The code in XLogInsert that
863 : * actually triggers a checkpoint when CheckPointSegments is exceeded
864 : * compares against RedoRecPtr, so this is not completely accurate.
865 : * However, it's good enough for our purposes, we're only calculating an
866 : * estimate anyway.
867 : *
868 : * During recovery, we compare last replayed WAL record's location with
869 : * the location computed before calling CreateRestartPoint. That maintains
870 : * the same pacing as we have during checkpoints in normal operation, but
871 : * we might exceed max_wal_size by a fair amount. That's because there can
872 : * be a large gap between a checkpoint's redo-pointer and the checkpoint
873 : * record itself, and we only start the restartpoint after we've seen the
874 : * checkpoint record. (The gap is typically up to CheckPointSegments *
875 : * checkpoint_completion_target where checkpoint_completion_target is the
876 : * value that was in effect when the WAL was generated).
877 : */
878 20382 : if (RecoveryInProgress())
879 9274 : recptr = GetXLogReplayRecPtr(NULL);
880 : else
881 11108 : recptr = GetInsertRecPtr();
882 20382 : elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
883 20382 : wal_segment_size) / CheckPointSegments;
884 :
885 20382 : if (progress < elapsed_xlogs)
886 : {
887 6748 : ckpt_cached_elapsed = elapsed_xlogs;
888 6748 : return false;
889 : }
890 :
891 : /*
892 : * Check progress against time elapsed and checkpoint_timeout.
893 : */
894 13634 : gettimeofday(&now, NULL);
895 13634 : elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
896 13634 : now.tv_usec / 1000000.0) / CheckPointTimeout;
897 :
898 13634 : if (progress < elapsed_time)
899 : {
900 0 : ckpt_cached_elapsed = elapsed_time;
901 0 : return false;
902 : }
903 :
904 : /* It looks like we're on schedule. */
905 13634 : return true;
906 : }
907 :
908 :
909 : /* --------------------------------
910 : * signal handler routines
911 : * --------------------------------
912 : */
913 :
914 : /* SIGINT: set flag to trigger writing of shutdown checkpoint */
915 : static void
916 904 : ReqShutdownXLOG(SIGNAL_ARGS)
917 : {
918 904 : ShutdownXLOGPending = true;
919 904 : SetLatch(MyLatch);
920 904 : }
921 :
922 :
923 : /* --------------------------------
924 : * communication with backends
925 : * --------------------------------
926 : */
927 :
928 : /*
929 : * CheckpointerShmemSize
930 : * Compute space needed for checkpointer-related shared memory
931 : */
932 : Size
933 5544 : CheckpointerShmemSize(void)
934 : {
935 : Size size;
936 :
937 : /*
938 : * Currently, the size of the requests[] array is arbitrarily set equal to
939 : * NBuffers. This may prove too large or small ...
940 : */
941 5544 : size = offsetof(CheckpointerShmemStruct, requests);
942 5544 : size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
943 :
944 5544 : return size;
945 : }
946 :
947 : /*
948 : * CheckpointerShmemInit
949 : * Allocate and initialize checkpointer-related shared memory
950 : */
951 : void
952 1938 : CheckpointerShmemInit(void)
953 : {
954 1938 : Size size = CheckpointerShmemSize();
955 : bool found;
956 :
957 1938 : CheckpointerShmem = (CheckpointerShmemStruct *)
958 1938 : ShmemInitStruct("Checkpointer Data",
959 : size,
960 : &found);
961 :
962 1938 : if (!found)
963 : {
964 : /*
965 : * First time through, so initialize. Note that we zero the whole
966 : * requests array; this is so that CompactCheckpointerRequestQueue can
967 : * assume that any pad bytes in the request structs are zeroes.
968 : */
969 2222 : MemSet(CheckpointerShmem, 0, size);
970 1938 : SpinLockInit(&CheckpointerShmem->ckpt_lck);
971 1938 : CheckpointerShmem->max_requests = NBuffers;
972 1938 : ConditionVariableInit(&CheckpointerShmem->start_cv);
973 1938 : ConditionVariableInit(&CheckpointerShmem->done_cv);
974 : }
975 1938 : }
976 :
977 : /*
978 : * RequestCheckpoint
979 : * Called in backend processes to request a checkpoint
980 : *
981 : * flags is a bitwise OR of the following:
982 : * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
983 : * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
984 : * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
985 : * ignoring checkpoint_completion_target parameter.
986 : * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
987 : * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
988 : * CHECKPOINT_END_OF_RECOVERY).
989 : * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
990 : * just signal checkpointer to do it, and return).
991 : * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
992 : * (This affects logging, and in particular enables CheckPointWarning.)
993 : */
994 : void
995 3974 : RequestCheckpoint(int flags)
996 : {
997 : int ntries;
998 : int old_failed,
999 : old_started;
1000 :
1001 : /*
1002 : * If in a standalone backend, just do it ourselves.
1003 : */
1004 3974 : if (!IsPostmasterEnvironment)
1005 : {
1006 : /*
1007 : * There's no point in doing slow checkpoints in a standalone backend,
1008 : * because there's no other backends the checkpoint could disrupt.
1009 : */
1010 362 : CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
1011 :
1012 : /* Free all smgr objects, as CheckpointerMain() normally would. */
1013 362 : smgrdestroyall();
1014 :
1015 362 : return;
1016 : }
1017 :
1018 : /*
1019 : * Atomically set the request flags, and take a snapshot of the counters.
1020 : * When we see ckpt_started > old_started, we know the flags we set here
1021 : * have been seen by checkpointer.
1022 : *
1023 : * Note that we OR the flags with any existing flags, to avoid overriding
1024 : * a "stronger" request by another backend. The flag senses must be
1025 : * chosen to make this work!
1026 : */
1027 3612 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1028 :
1029 3612 : old_failed = CheckpointerShmem->ckpt_failed;
1030 3612 : old_started = CheckpointerShmem->ckpt_started;
1031 3612 : CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED);
1032 :
1033 3612 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1034 :
1035 : /*
1036 : * Set checkpointer's latch to request checkpoint. It's possible that the
1037 : * checkpointer hasn't started yet, so we will retry a few times if
1038 : * needed. (Actually, more than a few times, since on slow or overloaded
1039 : * buildfarm machines, it's been observed that the checkpointer can take
1040 : * several seconds to start.) However, if not told to wait for the
1041 : * checkpoint to occur, we consider failure to set the latch to be
1042 : * nonfatal and merely LOG it. The checkpointer should see the request
1043 : * when it does start, with or without the SetLatch().
1044 : */
1045 : #define MAX_SIGNAL_TRIES 600 /* max wait 60.0 sec */
1046 3612 : for (ntries = 0;; ntries++)
1047 10 : {
1048 3622 : volatile PROC_HDR *procglobal = ProcGlobal;
1049 3622 : ProcNumber checkpointerProc = procglobal->checkpointerProc;
1050 :
1051 3622 : if (checkpointerProc == INVALID_PROC_NUMBER)
1052 : {
1053 10 : if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
1054 : {
1055 0 : elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1056 : "could not notify checkpoint: checkpointer is not running");
1057 0 : break;
1058 : }
1059 : }
1060 : else
1061 : {
1062 3612 : SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1063 : /* notified successfully */
1064 3612 : break;
1065 : }
1066 :
1067 10 : CHECK_FOR_INTERRUPTS();
1068 10 : pg_usleep(100000L); /* wait 0.1 sec, then retry */
1069 : }
1070 :
1071 : /*
1072 : * If requested, wait for completion. We detect completion according to
1073 : * the algorithm given above.
1074 : */
1075 3612 : if (flags & CHECKPOINT_WAIT)
1076 : {
1077 : int new_started,
1078 : new_failed;
1079 :
1080 : /* Wait for a new checkpoint to start. */
1081 902 : ConditionVariablePrepareToSleep(&CheckpointerShmem->start_cv);
1082 : for (;;)
1083 : {
1084 1682 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1085 1682 : new_started = CheckpointerShmem->ckpt_started;
1086 1682 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1087 :
1088 1682 : if (new_started != old_started)
1089 902 : break;
1090 :
1091 780 : ConditionVariableSleep(&CheckpointerShmem->start_cv,
1092 : WAIT_EVENT_CHECKPOINT_START);
1093 : }
1094 902 : ConditionVariableCancelSleep();
1095 :
1096 : /*
1097 : * We are waiting for ckpt_done >= new_started, in a modulo sense.
1098 : */
1099 902 : ConditionVariablePrepareToSleep(&CheckpointerShmem->done_cv);
1100 : for (;;)
1101 832 : {
1102 : int new_done;
1103 :
1104 1734 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1105 1734 : new_done = CheckpointerShmem->ckpt_done;
1106 1734 : new_failed = CheckpointerShmem->ckpt_failed;
1107 1734 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1108 :
1109 1734 : if (new_done - new_started >= 0)
1110 902 : break;
1111 :
1112 832 : ConditionVariableSleep(&CheckpointerShmem->done_cv,
1113 : WAIT_EVENT_CHECKPOINT_DONE);
1114 : }
1115 902 : ConditionVariableCancelSleep();
1116 :
1117 902 : if (new_failed != old_failed)
1118 0 : ereport(ERROR,
1119 : (errmsg("checkpoint request failed"),
1120 : errhint("Consult recent messages in the server log for details.")));
1121 : }
1122 : }
1123 :
1124 : /*
1125 : * ForwardSyncRequest
1126 : * Forward a file-fsync request from a backend to the checkpointer
1127 : *
1128 : * Whenever a backend is compelled to write directly to a relation
1129 : * (which should be seldom, if the background writer is getting its job done),
1130 : * the backend calls this routine to pass over knowledge that the relation
1131 : * is dirty and must be fsync'd before next checkpoint. We also use this
1132 : * opportunity to count such writes for statistical purposes.
1133 : *
1134 : * To avoid holding the lock for longer than necessary, we normally write
1135 : * to the requests[] queue without checking for duplicates. The checkpointer
1136 : * will have to eliminate dups internally anyway. However, if we discover
1137 : * that the queue is full, we make a pass over the entire queue to compact
1138 : * it. This is somewhat expensive, but the alternative is for the backend
1139 : * to perform its own fsync, which is far more expensive in practice. It
1140 : * is theoretically possible a backend fsync might still be necessary, if
1141 : * the queue is full and contains no duplicate entries. In that case, we
1142 : * let the backend know by returning false.
1143 : */
1144 : bool
1145 2331194 : ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
1146 : {
1147 : CheckpointerRequest *request;
1148 : bool too_full;
1149 :
1150 2331194 : if (!IsUnderPostmaster)
1151 0 : return false; /* probably shouldn't even get here */
1152 :
1153 2331194 : if (AmCheckpointerProcess())
1154 0 : elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
1155 :
1156 2331194 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1157 :
1158 : /*
1159 : * If the checkpointer isn't running or the request queue is full, the
1160 : * backend will have to perform its own fsync request. But before forcing
1161 : * that to happen, we can try to compact the request queue.
1162 : */
1163 2331194 : if (CheckpointerShmem->checkpointer_pid == 0 ||
1164 2331106 : (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
1165 1968 : !CompactCheckpointerRequestQueue()))
1166 : {
1167 1758 : LWLockRelease(CheckpointerCommLock);
1168 1758 : return false;
1169 : }
1170 :
1171 : /* OK, insert request */
1172 2329436 : request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
1173 2329436 : request->ftag = *ftag;
1174 2329436 : request->type = type;
1175 :
1176 : /* If queue is more than half full, nudge the checkpointer to empty it */
1177 2329436 : too_full = (CheckpointerShmem->num_requests >=
1178 2329436 : CheckpointerShmem->max_requests / 2);
1179 :
1180 2329436 : LWLockRelease(CheckpointerCommLock);
1181 :
1182 : /* ... but not till after we release the lock */
1183 2329436 : if (too_full)
1184 : {
1185 44724 : volatile PROC_HDR *procglobal = ProcGlobal;
1186 44724 : ProcNumber checkpointerProc = procglobal->checkpointerProc;
1187 :
1188 44724 : if (checkpointerProc != INVALID_PROC_NUMBER)
1189 44724 : SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1190 : }
1191 :
1192 2329436 : return true;
1193 : }
1194 :
1195 : /*
1196 : * CompactCheckpointerRequestQueue
1197 : * Remove duplicates from the request queue to avoid backend fsyncs.
1198 : * Returns "true" if any entries were removed.
1199 : *
1200 : * Although a full fsync request queue is not common, it can lead to severe
1201 : * performance problems when it does happen. So far, this situation has
1202 : * only been observed to occur when the system is under heavy write load,
1203 : * and especially during the "sync" phase of a checkpoint. Without this
1204 : * logic, each backend begins doing an fsync for every block written, which
1205 : * gets very expensive and can slow down the whole system.
1206 : *
1207 : * Trying to do this every time the queue is full could lose if there
1208 : * aren't any removable entries. But that should be vanishingly rare in
1209 : * practice: there's one queue entry per shared buffer.
1210 : */
1211 : static bool
1212 1968 : CompactCheckpointerRequestQueue(void)
1213 : {
1214 : struct CheckpointerSlotMapping
1215 : {
1216 : CheckpointerRequest request;
1217 : int slot;
1218 : };
1219 :
1220 : int n,
1221 : preserve_count;
1222 1968 : int num_skipped = 0;
1223 : HASHCTL ctl;
1224 : HTAB *htab;
1225 : bool *skip_slot;
1226 :
1227 : /* must hold CheckpointerCommLock in exclusive mode */
1228 : Assert(LWLockHeldByMe(CheckpointerCommLock));
1229 :
1230 : /* Avoid memory allocations in a critical section. */
1231 1968 : if (CritSectionCount > 0)
1232 6 : return false;
1233 :
1234 : /* Initialize skip_slot array */
1235 1962 : skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
1236 :
1237 : /* Initialize temporary hash table */
1238 1962 : ctl.keysize = sizeof(CheckpointerRequest);
1239 1962 : ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
1240 1962 : ctl.hcxt = CurrentMemoryContext;
1241 :
1242 1962 : htab = hash_create("CompactCheckpointerRequestQueue",
1243 1962 : CheckpointerShmem->num_requests,
1244 : &ctl,
1245 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1246 :
1247 : /*
1248 : * The basic idea here is that a request can be skipped if it's followed
1249 : * by a later, identical request. It might seem more sensible to work
1250 : * backwards from the end of the queue and check whether a request is
1251 : * *preceded* by an earlier, identical request, in the hopes of doing less
1252 : * copying. But that might change the semantics, if there's an
1253 : * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
1254 : * this way. It would be possible to be even smarter if we made the code
1255 : * below understand the specific semantics of such requests (it could blow
1256 : * away preceding entries that would end up being canceled anyhow), but
1257 : * it's not clear that the extra complexity would buy us anything.
1258 : */
1259 253098 : for (n = 0; n < CheckpointerShmem->num_requests; n++)
1260 : {
1261 : CheckpointerRequest *request;
1262 : struct CheckpointerSlotMapping *slotmap;
1263 : bool found;
1264 :
1265 : /*
1266 : * We use the request struct directly as a hashtable key. This
1267 : * assumes that any padding bytes in the structs are consistently the
1268 : * same, which should be okay because we zeroed them in
1269 : * CheckpointerShmemInit. Note also that RelFileLocator had better
1270 : * contain no pad bytes.
1271 : */
1272 251136 : request = &CheckpointerShmem->requests[n];
1273 251136 : slotmap = hash_search(htab, request, HASH_ENTER, &found);
1274 251136 : if (found)
1275 : {
1276 : /* Duplicate, so mark the previous occurrence as skippable */
1277 16206 : skip_slot[slotmap->slot] = true;
1278 16206 : num_skipped++;
1279 : }
1280 : /* Remember slot containing latest occurrence of this request value */
1281 251136 : slotmap->slot = n;
1282 : }
1283 :
1284 : /* Done with the hash table. */
1285 1962 : hash_destroy(htab);
1286 :
1287 : /* If no duplicates, we're out of luck. */
1288 1962 : if (!num_skipped)
1289 : {
1290 1664 : pfree(skip_slot);
1291 1664 : return false;
1292 : }
1293 :
1294 : /* We found some duplicates; remove them. */
1295 298 : preserve_count = 0;
1296 38442 : for (n = 0; n < CheckpointerShmem->num_requests; n++)
1297 : {
1298 38144 : if (skip_slot[n])
1299 16206 : continue;
1300 21938 : CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
1301 : }
1302 298 : ereport(DEBUG1,
1303 : (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
1304 : CheckpointerShmem->num_requests, preserve_count)));
1305 298 : CheckpointerShmem->num_requests = preserve_count;
1306 :
1307 : /* Cleanup. */
1308 298 : pfree(skip_slot);
1309 298 : return true;
1310 : }
1311 :
1312 : /*
1313 : * AbsorbSyncRequests
1314 : * Retrieve queued sync requests and pass them to sync mechanism.
1315 : *
1316 : * This is exported because it must be called during CreateCheckPoint;
1317 : * we have to be sure we have accepted all pending requests just before
1318 : * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1319 : * non-checkpointer processes, do nothing if not checkpointer.
1320 : */
1321 : void
1322 31170 : AbsorbSyncRequests(void)
1323 : {
1324 31170 : CheckpointerRequest *requests = NULL;
1325 : CheckpointerRequest *request;
1326 : int n;
1327 :
1328 31170 : if (!AmCheckpointerProcess())
1329 1120 : return;
1330 :
1331 30050 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1332 :
1333 : /*
1334 : * We try to avoid holding the lock for a long time by copying the request
1335 : * array, and processing the requests after releasing the lock.
1336 : *
1337 : * Once we have cleared the requests from shared memory, we have to PANIC
1338 : * if we then fail to absorb them (eg, because our hashtable runs out of
1339 : * memory). This is because the system cannot run safely if we are unable
1340 : * to fsync what we have been told to fsync. Fortunately, the hashtable
1341 : * is so small that the problem is quite unlikely to arise in practice.
1342 : */
1343 30050 : n = CheckpointerShmem->num_requests;
1344 30050 : if (n > 0)
1345 : {
1346 16680 : requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
1347 16680 : memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
1348 : }
1349 :
1350 30050 : START_CRIT_SECTION();
1351 :
1352 30050 : CheckpointerShmem->num_requests = 0;
1353 :
1354 30050 : LWLockRelease(CheckpointerCommLock);
1355 :
1356 2169252 : for (request = requests; n > 0; request++, n--)
1357 2139202 : RememberSyncRequest(&request->ftag, request->type);
1358 :
1359 30050 : END_CRIT_SECTION();
1360 :
1361 30050 : if (requests)
1362 16680 : pfree(requests);
1363 : }
1364 :
1365 : /*
1366 : * Update any shared memory configurations based on config parameters
1367 : */
1368 : static void
1369 1012 : UpdateSharedMemoryConfig(void)
1370 : {
1371 : /* update global shmem state for sync rep */
1372 1012 : SyncRepUpdateSyncStandbysDefined();
1373 :
1374 : /*
1375 : * If full_page_writes has been changed by SIGHUP, we update it in shared
1376 : * memory and write an XLOG_FPW_CHANGE record.
1377 : */
1378 1012 : UpdateFullPageWrites();
1379 :
1380 1012 : elog(DEBUG2, "checkpointer updated shared memory configuration values");
1381 1012 : }
1382 :
1383 : /*
1384 : * FirstCallSinceLastCheckpoint allows a process to take an action once
1385 : * per checkpoint cycle by asynchronously checking for checkpoint completion.
1386 : */
1387 : bool
1388 18370 : FirstCallSinceLastCheckpoint(void)
1389 : {
1390 : static int ckpt_done = 0;
1391 : int new_done;
1392 18370 : bool FirstCall = false;
1393 :
1394 18370 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1395 18370 : new_done = CheckpointerShmem->ckpt_done;
1396 18370 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1397 :
1398 18370 : if (new_done != ckpt_done)
1399 948 : FirstCall = true;
1400 :
1401 18370 : ckpt_done = new_done;
1402 :
1403 18370 : return FirstCall;
1404 : }
|