LCOV - code coverage report
Current view: top level - src/backend/postmaster - checkpointer.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 86.9 % 406 353
Test Date: 2026-03-21 14:16:16 Functions: 100.0 % 17 17
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * checkpointer.c
       4              :  *
       5              :  * The checkpointer is new as of Postgres 9.2.  It handles all checkpoints.
       6              :  * Checkpoints are automatically dispatched after a certain amount of time has
       7              :  * elapsed since the last one, and it can be signaled to perform requested
       8              :  * checkpoints as well.  (The GUC parameter that mandates a checkpoint every
       9              :  * so many WAL segments is implemented by having backends signal when they
      10              :  * fill WAL segments; the checkpointer itself doesn't watch for the
      11              :  * condition.)
      12              :  *
      13              :  * The normal termination sequence is that checkpointer is instructed to
      14              :  * execute the shutdown checkpoint by SIGINT.  After that checkpointer waits
      15              :  * to be terminated via SIGUSR2, which instructs the checkpointer to exit(0).
      16              :  * All backends must be stopped before SIGINT or SIGUSR2 is issued!
      17              :  *
      18              :  * Emergency termination is by SIGQUIT; like any backend, the checkpointer
      19              :  * will simply abort and exit on SIGQUIT.
      20              :  *
      21              :  * If the checkpointer exits unexpectedly, the postmaster treats that the same
      22              :  * as a backend crash: shared memory may be corrupted, so remaining backends
      23              :  * should be killed by SIGQUIT and then a recovery cycle started.  (Even if
      24              :  * shared memory isn't corrupted, we have lost information about which
      25              :  * files need to be fsync'd for the next checkpoint, and so a system
      26              :  * restart needs to be forced.)
      27              :  *
      28              :  *
      29              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
      30              :  *
      31              :  *
      32              :  * IDENTIFICATION
      33              :  *    src/backend/postmaster/checkpointer.c
      34              :  *
      35              :  *-------------------------------------------------------------------------
      36              :  */
      37              : #include "postgres.h"
      38              : 
      39              : #include <sys/time.h>
      40              : #include <time.h>
      41              : 
      42              : #include "access/xlog.h"
      43              : #include "access/xlog_internal.h"
      44              : #include "access/xlogrecovery.h"
      45              : #include "catalog/pg_authid.h"
      46              : #include "commands/defrem.h"
      47              : #include "libpq/pqsignal.h"
      48              : #include "miscadmin.h"
      49              : #include "pgstat.h"
      50              : #include "postmaster/auxprocess.h"
      51              : #include "postmaster/bgwriter.h"
      52              : #include "postmaster/interrupt.h"
      53              : #include "replication/syncrep.h"
      54              : #include "storage/aio_subsys.h"
      55              : #include "storage/bufmgr.h"
      56              : #include "storage/condition_variable.h"
      57              : #include "storage/fd.h"
      58              : #include "storage/ipc.h"
      59              : #include "storage/lwlock.h"
      60              : #include "storage/pmsignal.h"
      61              : #include "storage/proc.h"
      62              : #include "storage/procsignal.h"
      63              : #include "storage/shmem.h"
      64              : #include "storage/smgr.h"
      65              : #include "storage/spin.h"
      66              : #include "utils/acl.h"
      67              : #include "utils/guc.h"
      68              : #include "utils/memutils.h"
      69              : #include "utils/resowner.h"
      70              : #include "utils/wait_event.h"
      71              : 
      72              : 
      73              : /*----------
      74              :  * Shared memory area for communication between checkpointer and backends
      75              :  *
      76              :  * The ckpt counters allow backends to watch for completion of a checkpoint
      77              :  * request they send.  Here's how it works:
      78              :  *  * At start of a checkpoint, checkpointer reads (and clears) the request
      79              :  *    flags and increments ckpt_started, while holding ckpt_lck.
      80              :  *  * On completion of a checkpoint, checkpointer sets ckpt_done to
      81              :  *    equal ckpt_started.
      82              :  *  * On failure of a checkpoint, checkpointer increments ckpt_failed
      83              :  *    and sets ckpt_done to equal ckpt_started.
      84              :  *
      85              :  * The algorithm for backends is:
      86              :  *  1. Record current values of ckpt_failed and ckpt_started, and
      87              :  *     set request flags, while holding ckpt_lck.
      88              :  *  2. Send signal to request checkpoint.
      89              :  *  3. Sleep until ckpt_started changes.  Now you know a checkpoint has
      90              :  *     begun since you started this algorithm (although *not* that it was
      91              :  *     specifically initiated by your signal), and that it is using your flags.
      92              :  *  4. Record new value of ckpt_started.
      93              :  *  5. Sleep until ckpt_done >= saved value of ckpt_started.  (Use modulo
      94              :  *     arithmetic here in case counters wrap around.)  Now you know a
      95              :  *     checkpoint has started and completed, but not whether it was
      96              :  *     successful.
      97              :  *  6. If ckpt_failed is different from the originally saved value,
      98              :  *     assume request failed; otherwise it was definitely successful.
      99              :  *
     100              :  * ckpt_flags holds the OR of the checkpoint request flags sent by all
     101              :  * requesting backends since the last checkpoint start.  The flags are
     102              :  * chosen so that OR'ing is the correct way to combine multiple requests.
     103              :  *
     104              :  * The requests array holds fsync requests sent by backends and not yet
     105              :  * absorbed by the checkpointer.
     106              :  *
     107              :  * Unlike the checkpoint fields, requests related fields are protected by
     108              :  * CheckpointerCommLock.
     109              :  *----------
     110              :  */
     111              : typedef struct
     112              : {
     113              :     SyncRequestType type;       /* request type */
     114              :     FileTag     ftag;           /* file identifier */
     115              : } CheckpointerRequest;
     116              : 
     117              : typedef struct
     118              : {
     119              :     pid_t       checkpointer_pid;   /* PID (0 if not started) */
     120              : 
     121              :     slock_t     ckpt_lck;       /* protects all the ckpt_* fields */
     122              : 
     123              :     int         ckpt_started;   /* advances when checkpoint starts */
     124              :     int         ckpt_done;      /* advances when checkpoint done */
     125              :     int         ckpt_failed;    /* advances when checkpoint fails */
     126              : 
     127              :     int         ckpt_flags;     /* checkpoint flags, as defined in xlog.h */
     128              : 
     129              :     ConditionVariable start_cv; /* signaled when ckpt_started advances */
     130              :     ConditionVariable done_cv;  /* signaled when ckpt_done advances */
     131              : 
     132              :     int         num_requests;   /* current # of requests */
     133              :     int         max_requests;   /* allocated array size */
     134              : 
     135              :     int         head;           /* Index of the first request in the ring
     136              :                                  * buffer */
     137              :     int         tail;           /* Index of the last request in the ring
     138              :                                  * buffer */
     139              : 
     140              :     /* The ring buffer of pending checkpointer requests */
     141              :     CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
     142              : } CheckpointerShmemStruct;
     143              : 
     144              : static CheckpointerShmemStruct *CheckpointerShmem;
     145              : 
     146              : /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
     147              : #define WRITES_PER_ABSORB       1000
     148              : 
     149              : /* Maximum number of checkpointer requests to process in one batch */
     150              : #define CKPT_REQ_BATCH_SIZE 10000
     151              : 
     152              : /* Max number of requests the checkpointer request queue can hold */
     153              : #define MAX_CHECKPOINT_REQUESTS 10000000
     154              : 
     155              : /*
     156              :  * GUC parameters
     157              :  */
     158              : int         CheckPointTimeout = 300;
     159              : int         CheckPointWarning = 30;
     160              : double      CheckPointCompletionTarget = 0.9;
     161              : 
     162              : /*
     163              :  * Private state
     164              :  */
     165              : static bool ckpt_active = false;
     166              : static volatile sig_atomic_t ShutdownXLOGPending = false;
     167              : 
     168              : /* these values are valid when ckpt_active is true: */
     169              : static pg_time_t ckpt_start_time;
     170              : static XLogRecPtr ckpt_start_recptr;
     171              : static double ckpt_cached_elapsed;
     172              : 
     173              : static pg_time_t last_checkpoint_time;
     174              : static pg_time_t last_xlog_switch_time;
     175              : 
     176              : /* Prototypes for private functions */
     177              : 
     178              : static void ProcessCheckpointerInterrupts(void);
     179              : static void CheckArchiveTimeout(void);
     180              : static bool IsCheckpointOnSchedule(double progress);
     181              : static bool FastCheckpointRequested(void);
     182              : static bool CompactCheckpointerRequestQueue(void);
     183              : static void UpdateSharedMemoryConfig(void);
     184              : 
     185              : /* Signal handlers */
     186              : static void ReqShutdownXLOG(SIGNAL_ARGS);
     187              : 
     188              : 
     189              : /*
     190              :  * Main entry point for checkpointer process
     191              :  *
     192              :  * This is invoked from AuxiliaryProcessMain, which has already created the
     193              :  * basic execution environment, but not enabled signals yet.
     194              :  */
     195              : void
     196          607 : CheckpointerMain(const void *startup_data, size_t startup_data_len)
     197              : {
     198              :     sigjmp_buf  local_sigjmp_buf;
     199              :     MemoryContext checkpointer_context;
     200              : 
     201              :     Assert(startup_data_len == 0);
     202              : 
     203          607 :     AuxiliaryProcessMainCommon();
     204              : 
     205          607 :     CheckpointerShmem->checkpointer_pid = MyProcPid;
     206              : 
     207              :     /*
     208              :      * Properly accept or ignore signals the postmaster might send us
     209              :      *
     210              :      * Note: we deliberately ignore SIGTERM, because during a standard Unix
     211              :      * system shutdown cycle, init will SIGTERM all processes at once.  We
     212              :      * want to wait for the backends to exit, whereupon the postmaster will
     213              :      * tell us it's okay to shut down (via SIGUSR2).
     214              :      */
     215          607 :     pqsignal(SIGHUP, SignalHandlerForConfigReload);
     216          607 :     pqsignal(SIGINT, ReqShutdownXLOG);
     217          607 :     pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
     218              :     /* SIGQUIT handler was already set up by InitPostmasterChild */
     219          607 :     pqsignal(SIGALRM, SIG_IGN);
     220          607 :     pqsignal(SIGPIPE, SIG_IGN);
     221          607 :     pqsignal(SIGUSR1, procsignal_sigusr1_handler);
     222          607 :     pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
     223              : 
     224              :     /*
     225              :      * Reset some signals that are accepted by postmaster but not here
     226              :      */
     227          607 :     pqsignal(SIGCHLD, SIG_DFL);
     228              : 
     229              :     /*
     230              :      * Initialize so that first time-driven event happens at the correct time.
     231              :      */
     232          607 :     last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
     233              : 
     234              :     /*
     235              :      * Write out stats after shutdown. This needs to be called by exactly one
     236              :      * process during a normal shutdown, and since checkpointer is shut down
     237              :      * very late...
     238              :      *
     239              :      * While e.g. walsenders are active after the shutdown checkpoint has been
     240              :      * written (and thus could produce more stats), checkpointer stays around
     241              :      * after the shutdown checkpoint has been written. postmaster will only
     242              :      * signal checkpointer to exit after all processes that could emit stats
     243              :      * have been shut down.
     244              :      */
     245          607 :     before_shmem_exit(pgstat_before_server_shutdown, 0);
     246              : 
     247              :     /*
     248              :      * Create a memory context that we will do all our work in.  We do this so
     249              :      * that we can reset the context during error recovery and thereby avoid
     250              :      * possible memory leaks.  Formerly this code just ran in
     251              :      * TopMemoryContext, but resetting that would be a really bad idea.
     252              :      */
     253          607 :     checkpointer_context = AllocSetContextCreate(TopMemoryContext,
     254              :                                                  "Checkpointer",
     255              :                                                  ALLOCSET_DEFAULT_SIZES);
     256          607 :     MemoryContextSwitchTo(checkpointer_context);
     257              : 
     258              :     /*
     259              :      * If an exception is encountered, processing resumes here.
     260              :      *
     261              :      * You might wonder why this isn't coded as an infinite loop around a
     262              :      * PG_TRY construct.  The reason is that this is the bottom of the
     263              :      * exception stack, and so with PG_TRY there would be no exception handler
     264              :      * in force at all during the CATCH part.  By leaving the outermost setjmp
     265              :      * always active, we have at least some chance of recovering from an error
     266              :      * during error recovery.  (If we get into an infinite loop thereby, it
     267              :      * will soon be stopped by overflow of elog.c's internal state stack.)
     268              :      *
     269              :      * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
     270              :      * (to wit, BlockSig) will be restored when longjmp'ing to here.  Thus,
     271              :      * signals other than SIGQUIT will be blocked until we complete error
     272              :      * recovery.  It might seem that this policy makes the HOLD_INTERRUPTS()
     273              :      * call redundant, but it is not since InterruptPending might be set
     274              :      * already.
     275              :      */
     276          607 :     if (sigsetjmp(local_sigjmp_buf, 1) != 0)
     277              :     {
     278              :         /* Since not using PG_TRY, must reset error stack by hand */
     279            0 :         error_context_stack = NULL;
     280              : 
     281              :         /* Prevent interrupts while cleaning up */
     282            0 :         HOLD_INTERRUPTS();
     283              : 
     284              :         /* Report the error to the server log */
     285            0 :         EmitErrorReport();
     286              : 
     287              :         /*
     288              :          * These operations are really just a minimal subset of
     289              :          * AbortTransaction().  We don't have very many resources to worry
     290              :          * about in checkpointer, but we do have LWLocks, buffers, and temp
     291              :          * files.
     292              :          */
     293            0 :         LWLockReleaseAll();
     294            0 :         ConditionVariableCancelSleep();
     295            0 :         pgstat_report_wait_end();
     296            0 :         pgaio_error_cleanup();
     297            0 :         UnlockBuffers();
     298            0 :         ReleaseAuxProcessResources(false);
     299            0 :         AtEOXact_Buffers(false);
     300            0 :         AtEOXact_SMgr();
     301            0 :         AtEOXact_Files(false);
     302            0 :         AtEOXact_HashTables(false);
     303              : 
     304              :         /* Warn any waiting backends that the checkpoint failed. */
     305            0 :         if (ckpt_active)
     306              :         {
     307            0 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
     308            0 :             CheckpointerShmem->ckpt_failed++;
     309            0 :             CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
     310            0 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
     311              : 
     312            0 :             ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
     313              : 
     314            0 :             ckpt_active = false;
     315              :         }
     316              : 
     317              :         /*
     318              :          * Now return to normal top-level context and clear ErrorContext for
     319              :          * next time.
     320              :          */
     321            0 :         MemoryContextSwitchTo(checkpointer_context);
     322            0 :         FlushErrorState();
     323              : 
     324              :         /* Flush any leaked data in the top-level context */
     325            0 :         MemoryContextReset(checkpointer_context);
     326              : 
     327              :         /* Now we can allow interrupts again */
     328            0 :         RESUME_INTERRUPTS();
     329              : 
     330              :         /*
     331              :          * Sleep at least 1 second after any error.  A write error is likely
     332              :          * to be repeated, and we don't want to be filling the error logs as
     333              :          * fast as we can.
     334              :          */
     335            0 :         pg_usleep(1000000L);
     336              :     }
     337              : 
     338              :     /* We can now handle ereport(ERROR) */
     339          607 :     PG_exception_stack = &local_sigjmp_buf;
     340              : 
     341              :     /*
     342              :      * Unblock signals (they were blocked when the postmaster forked us)
     343              :      */
     344          607 :     sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
     345              : 
     346              :     /*
     347              :      * Ensure all shared memory values are set correctly for the config. Doing
     348              :      * this here ensures no race conditions from other concurrent updaters.
     349              :      */
     350          607 :     UpdateSharedMemoryConfig();
     351              : 
     352              :     /*
     353              :      * Advertise our proc number that backends can use to wake us up while
     354              :      * we're sleeping.
     355              :      */
     356          607 :     ProcGlobal->checkpointerProc = MyProcNumber;
     357              : 
     358              :     /*
     359              :      * Loop until we've been asked to write the shutdown checkpoint or
     360              :      * terminate.
     361              :      */
     362              :     for (;;)
     363         4493 :     {
     364         5100 :         bool        do_checkpoint = false;
     365         5100 :         int         flags = 0;
     366              :         pg_time_t   now;
     367              :         int         elapsed_secs;
     368              :         int         cur_timeout;
     369         5100 :         bool        chkpt_or_rstpt_requested = false;
     370         5100 :         bool        chkpt_or_rstpt_timed = false;
     371              : 
     372              :         /* Clear any already-pending wakeups */
     373         5100 :         ResetLatch(MyLatch);
     374              : 
     375              :         /*
     376              :          * Process any requests or signals received recently.
     377              :          */
     378         5100 :         AbsorbSyncRequests();
     379              : 
     380         5100 :         ProcessCheckpointerInterrupts();
     381         5100 :         if (ShutdownXLOGPending || ShutdownRequestPending)
     382              :             break;
     383              : 
     384              :         /*
     385              :          * Detect a pending checkpoint request by checking whether the flags
     386              :          * word in shared memory is nonzero.  We shouldn't need to acquire the
     387              :          * ckpt_lck for this.
     388              :          */
     389         4507 :         if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
     390              :         {
     391         1316 :             do_checkpoint = true;
     392         1316 :             chkpt_or_rstpt_requested = true;
     393              :         }
     394              : 
     395              :         /*
     396              :          * Force a checkpoint if too much time has elapsed since the last one.
     397              :          * Note that we count a timed checkpoint in stats only when this
     398              :          * occurs without an external request, but we set the CAUSE_TIME flag
     399              :          * bit even if there is also an external request.
     400              :          */
     401         4507 :         now = (pg_time_t) time(NULL);
     402         4507 :         elapsed_secs = now - last_checkpoint_time;
     403         4507 :         if (elapsed_secs >= CheckPointTimeout)
     404              :         {
     405            1 :             if (!do_checkpoint)
     406            1 :                 chkpt_or_rstpt_timed = true;
     407            1 :             do_checkpoint = true;
     408            1 :             flags |= CHECKPOINT_CAUSE_TIME;
     409              :         }
     410              : 
     411              :         /*
     412              :          * Do a checkpoint if requested.
     413              :          */
     414         4507 :         if (do_checkpoint)
     415              :         {
     416         1317 :             bool        ckpt_performed = false;
     417              :             bool        do_restartpoint;
     418              : 
     419              :             /* Check if we should perform a checkpoint or a restartpoint. */
     420         1317 :             do_restartpoint = RecoveryInProgress();
     421              : 
     422              :             /*
     423              :              * Atomically fetch the request flags to figure out what kind of a
     424              :              * checkpoint we should perform, and increase the started-counter
     425              :              * to acknowledge that we've started a new checkpoint.
     426              :              */
     427         1317 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
     428         1317 :             flags |= CheckpointerShmem->ckpt_flags;
     429         1317 :             CheckpointerShmem->ckpt_flags = 0;
     430         1317 :             CheckpointerShmem->ckpt_started++;
     431         1317 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
     432              : 
     433         1317 :             ConditionVariableBroadcast(&CheckpointerShmem->start_cv);
     434              : 
     435              :             /*
     436              :              * The end-of-recovery checkpoint is a real checkpoint that's
     437              :              * performed while we're still in recovery.
     438              :              */
     439         1317 :             if (flags & CHECKPOINT_END_OF_RECOVERY)
     440           21 :                 do_restartpoint = false;
     441              : 
     442         1317 :             if (chkpt_or_rstpt_timed)
     443              :             {
     444            1 :                 chkpt_or_rstpt_timed = false;
     445            1 :                 if (do_restartpoint)
     446            0 :                     PendingCheckpointerStats.restartpoints_timed++;
     447              :                 else
     448            1 :                     PendingCheckpointerStats.num_timed++;
     449              :             }
     450              : 
     451         1317 :             if (chkpt_or_rstpt_requested)
     452              :             {
     453         1316 :                 chkpt_or_rstpt_requested = false;
     454         1316 :                 if (do_restartpoint)
     455          552 :                     PendingCheckpointerStats.restartpoints_requested++;
     456              :                 else
     457          764 :                     PendingCheckpointerStats.num_requested++;
     458              :             }
     459              : 
     460              :             /*
     461              :              * We will warn if (a) too soon since last checkpoint (whatever
     462              :              * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
     463              :              * since the last checkpoint start.  Note in particular that this
     464              :              * implementation will not generate warnings caused by
     465              :              * CheckPointTimeout < CheckPointWarning.
     466              :              */
     467         1317 :             if (!do_restartpoint &&
     468          765 :                 (flags & CHECKPOINT_CAUSE_XLOG) &&
     469          200 :                 elapsed_secs < CheckPointWarning)
     470          200 :                 ereport(LOG,
     471              :                         (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
     472              :                                        "checkpoints are occurring too frequently (%d seconds apart)",
     473              :                                        elapsed_secs,
     474              :                                        elapsed_secs),
     475              :                          errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
     476              : 
     477              :             /*
     478              :              * Initialize checkpointer-private variables used during
     479              :              * checkpoint.
     480              :              */
     481         1317 :             ckpt_active = true;
     482         1317 :             if (do_restartpoint)
     483          552 :                 ckpt_start_recptr = GetXLogReplayRecPtr(NULL);
     484              :             else
     485          765 :                 ckpt_start_recptr = GetInsertRecPtr();
     486         1317 :             ckpt_start_time = now;
     487         1317 :             ckpt_cached_elapsed = 0;
     488              : 
     489              :             /*
     490              :              * Do the checkpoint.
     491              :              */
     492         1317 :             if (!do_restartpoint)
     493          765 :                 ckpt_performed = CreateCheckPoint(flags);
     494              :             else
     495          552 :                 ckpt_performed = CreateRestartPoint(flags);
     496              : 
     497              :             /*
     498              :              * After any checkpoint, free all smgr objects.  Otherwise we
     499              :              * would never do so for dropped relations, as the checkpointer
     500              :              * does not process shared invalidation messages or call
     501              :              * AtEOXact_SMgr().
     502              :              */
     503         1317 :             smgrdestroyall();
     504              : 
     505              :             /*
     506              :              * Indicate checkpoint completion to any waiting backends.
     507              :              */
     508         1317 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
     509         1317 :             CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
     510         1317 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
     511              : 
     512         1317 :             ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
     513              : 
     514         1317 :             if (!do_restartpoint)
     515              :             {
     516              :                 /*
     517              :                  * Note we record the checkpoint start time not end time as
     518              :                  * last_checkpoint_time.  This is so that time-driven
     519              :                  * checkpoints happen at a predictable spacing.
     520              :                  */
     521          765 :                 last_checkpoint_time = now;
     522              : 
     523          765 :                 if (ckpt_performed)
     524          762 :                     PendingCheckpointerStats.num_performed++;
     525              :             }
     526              :             else
     527              :             {
     528          552 :                 if (ckpt_performed)
     529              :                 {
     530              :                     /*
     531              :                      * The same as for checkpoint. Please see the
     532              :                      * corresponding comment.
     533              :                      */
     534          180 :                     last_checkpoint_time = now;
     535              : 
     536          180 :                     PendingCheckpointerStats.restartpoints_performed++;
     537              :                 }
     538              :                 else
     539              :                 {
     540              :                     /*
     541              :                      * We were not able to perform the restartpoint
     542              :                      * (checkpoints throw an ERROR in case of error).  Most
     543              :                      * likely because we have not received any new checkpoint
     544              :                      * WAL records since the last restartpoint. Try again in
     545              :                      * 15 s.
     546              :                      */
     547          372 :                     last_checkpoint_time = now - CheckPointTimeout + 15;
     548              :                 }
     549              :             }
     550              : 
     551         1317 :             ckpt_active = false;
     552              : 
     553              :             /*
     554              :              * We may have received an interrupt during the checkpoint and the
     555              :              * latch might have been reset (e.g. in CheckpointWriteDelay).
     556              :              */
     557         1317 :             ProcessCheckpointerInterrupts();
     558         1317 :             if (ShutdownXLOGPending || ShutdownRequestPending)
     559              :                 break;
     560              :         }
     561              : 
     562              :         /*
     563              :          * Disable logical decoding if someone requested it. See comments atop
     564              :          * logicalctl.c.
     565              :          */
     566         4500 :         DisableLogicalDecodingIfNecessary();
     567              : 
     568              :         /* Check for archive_timeout and switch xlog files if necessary. */
     569         4500 :         CheckArchiveTimeout();
     570              : 
     571              :         /* Report pending statistics to the cumulative stats system */
     572         4500 :         pgstat_report_checkpointer();
     573         4500 :         pgstat_report_wal(true);
     574              : 
     575              :         /*
     576              :          * If any checkpoint flags have been set, redo the loop to handle the
     577              :          * checkpoint without sleeping.
     578              :          */
     579         4500 :         if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
     580          351 :             continue;
     581              : 
     582              :         /*
     583              :          * Sleep until we are signaled or it's time for another checkpoint or
     584              :          * xlog file switch.
     585              :          */
     586         4149 :         now = (pg_time_t) time(NULL);
     587         4149 :         elapsed_secs = now - last_checkpoint_time;
     588         4149 :         if (elapsed_secs >= CheckPointTimeout)
     589            0 :             continue;           /* no sleep for us ... */
     590         4149 :         cur_timeout = CheckPointTimeout - elapsed_secs;
     591         4149 :         if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
     592              :         {
     593            0 :             elapsed_secs = now - last_xlog_switch_time;
     594            0 :             if (elapsed_secs >= XLogArchiveTimeout)
     595            0 :                 continue;       /* no sleep for us ... */
     596            0 :             cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
     597              :         }
     598              : 
     599         4149 :         (void) WaitLatch(MyLatch,
     600              :                          WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
     601              :                          cur_timeout * 1000L /* convert to ms */ ,
     602              :                          WAIT_EVENT_CHECKPOINTER_MAIN);
     603              :     }
     604              : 
     605              :     /*
     606              :      * From here on, elog(ERROR) should end with exit(1), not send control
     607              :      * back to the sigsetjmp block above.
     608              :      */
     609          600 :     ExitOnAnyError = true;
     610              : 
     611          600 :     if (ShutdownXLOGPending)
     612              :     {
     613              :         /*
     614              :          * Close down the database.
     615              :          *
     616              :          * Since ShutdownXLOG() creates restartpoint or checkpoint, and
     617              :          * updates the statistics, increment the checkpoint request and flush
     618              :          * out pending statistic.
     619              :          */
     620          600 :         PendingCheckpointerStats.num_requested++;
     621          600 :         ShutdownXLOG(0, 0);
     622          600 :         pgstat_report_checkpointer();
     623          600 :         pgstat_report_wal(true);
     624              : 
     625              :         /*
     626              :          * Tell postmaster that we're done.
     627              :          */
     628          600 :         SendPostmasterSignal(PMSIGNAL_XLOG_IS_SHUTDOWN);
     629          600 :         ShutdownXLOGPending = false;
     630              :     }
     631              : 
     632              :     /*
     633              :      * Wait until we're asked to shut down. By separating the writing of the
     634              :      * shutdown checkpoint from checkpointer exiting, checkpointer can perform
     635              :      * some should-be-as-late-as-possible work like writing out stats.
     636              :      */
     637              :     for (;;)
     638              :     {
     639              :         /* Clear any already-pending wakeups */
     640         1225 :         ResetLatch(MyLatch);
     641              : 
     642         1225 :         ProcessCheckpointerInterrupts();
     643              : 
     644         1225 :         if (ShutdownRequestPending)
     645          600 :             break;
     646              : 
     647          625 :         (void) WaitLatch(MyLatch,
     648              :                          WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
     649              :                          0,
     650              :                          WAIT_EVENT_CHECKPOINTER_SHUTDOWN);
     651              :     }
     652              : 
     653              :     /* Normal exit from the checkpointer is here */
     654          600 :     proc_exit(0);               /* done */
     655              : }
     656              : 
     657              : /*
     658              :  * Process any new interrupts.
     659              :  */
     660              : static void
     661         7642 : ProcessCheckpointerInterrupts(void)
     662              : {
     663         7642 :     if (ProcSignalBarrierPending)
     664          307 :         ProcessProcSignalBarrier();
     665              : 
     666         7642 :     if (ConfigReloadPending)
     667              :     {
     668           78 :         ConfigReloadPending = false;
     669           78 :         ProcessConfigFile(PGC_SIGHUP);
     670              : 
     671              :         /*
     672              :          * Checkpointer is the last process to shut down, so we ask it to hold
     673              :          * the keys for a range of other tasks required most of which have
     674              :          * nothing to do with checkpointing at all.
     675              :          *
     676              :          * For various reasons, some config values can change dynamically so
     677              :          * the primary copy of them is held in shared memory to make sure all
     678              :          * backends see the same value.  We make Checkpointer responsible for
     679              :          * updating the shared memory copy if the parameter setting changes
     680              :          * because of SIGHUP.
     681              :          */
     682           78 :         UpdateSharedMemoryConfig();
     683              :     }
     684              : 
     685              :     /* Perform logging of memory contexts of this process */
     686         7642 :     if (LogMemoryContextPending)
     687            1 :         ProcessLogMemoryContextInterrupt();
     688         7642 : }
     689              : 
     690              : /*
     691              :  * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
     692              :  *
     693              :  * This will switch to a new WAL file and force an archive file write if
     694              :  * meaningful activity is recorded in the current WAL file. This includes most
     695              :  * writes, including just a single checkpoint record, but excludes WAL records
     696              :  * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
     697              :  * snapshots of running transactions).  Such records, depending on
     698              :  * configuration, occur on regular intervals and don't contain important
     699              :  * information.  This avoids generating archives with a few unimportant
     700              :  * records.
     701              :  */
     702              : static void
     703        11998 : CheckArchiveTimeout(void)
     704              : {
     705              :     pg_time_t   now;
     706              :     pg_time_t   last_time;
     707              :     XLogRecPtr  last_switch_lsn;
     708              : 
     709        11998 :     if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
     710        11998 :         return;
     711              : 
     712            0 :     now = (pg_time_t) time(NULL);
     713              : 
     714              :     /* First we do a quick check using possibly-stale local state. */
     715            0 :     if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
     716            0 :         return;
     717              : 
     718              :     /*
     719              :      * Update local state ... note that last_xlog_switch_time is the last time
     720              :      * a switch was performed *or requested*.
     721              :      */
     722            0 :     last_time = GetLastSegSwitchData(&last_switch_lsn);
     723              : 
     724            0 :     last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
     725              : 
     726              :     /* Now we can do the real checks */
     727            0 :     if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
     728              :     {
     729              :         /*
     730              :          * Switch segment only when "important" WAL has been logged since the
     731              :          * last segment switch (last_switch_lsn points to end of segment
     732              :          * switch occurred in).
     733              :          */
     734            0 :         if (GetLastImportantRecPtr() > last_switch_lsn)
     735              :         {
     736              :             XLogRecPtr  switchpoint;
     737              : 
     738              :             /* mark switch as unimportant, avoids triggering checkpoints */
     739            0 :             switchpoint = RequestXLogSwitch(true);
     740              : 
     741              :             /*
     742              :              * If the returned pointer points exactly to a segment boundary,
     743              :              * assume nothing happened.
     744              :              */
     745            0 :             if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
     746            0 :                 elog(DEBUG1, "write-ahead log switch forced (\"archive_timeout\"=%d)",
     747              :                      XLogArchiveTimeout);
     748              :         }
     749              : 
     750              :         /*
     751              :          * Update state in any case, so we don't retry constantly when the
     752              :          * system is idle.
     753              :          */
     754            0 :         last_xlog_switch_time = now;
     755              :     }
     756              : }
     757              : 
     758              : /*
     759              :  * Returns true if a fast checkpoint request is pending.  (Note that this does
     760              :  * not check the *current* checkpoint's FAST flag, but whether there is one
     761              :  * pending behind it.)
     762              :  */
     763              : static bool
     764        48974 : FastCheckpointRequested(void)
     765              : {
     766        48974 :     volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
     767              : 
     768              :     /*
     769              :      * We don't need to acquire the ckpt_lck in this case because we're only
     770              :      * looking at a single flag bit.
     771              :      */
     772        48974 :     if (cps->ckpt_flags & CHECKPOINT_FAST)
     773         6700 :         return true;
     774        42274 :     return false;
     775              : }
     776              : 
     777              : /*
     778              :  * CheckpointWriteDelay -- control rate of checkpoint
     779              :  *
     780              :  * This function is called after each page write performed by BufferSync().
     781              :  * It is responsible for throttling BufferSync()'s write rate to hit
     782              :  * checkpoint_completion_target.
     783              :  *
     784              :  * The checkpoint request flags should be passed in; currently the only one
     785              :  * examined is CHECKPOINT_FAST, which disables delays between writes.
     786              :  *
     787              :  * 'progress' is an estimate of how much of the work has been done, as a
     788              :  * fraction between 0.0 meaning none, and 1.0 meaning all done.
     789              :  */
     790              : void
     791       322542 : CheckpointWriteDelay(int flags, double progress)
     792              : {
     793              :     static int  absorb_counter = WRITES_PER_ABSORB;
     794              : 
     795              :     /* Do nothing if checkpoint is being executed by non-checkpointer process */
     796       322542 :     if (!AmCheckpointerProcess())
     797        51929 :         return;
     798              : 
     799              :     /*
     800              :      * Perform the usual duties and take a nap, unless we're behind schedule,
     801              :      * in which case we just try to catch up as quickly as possible.
     802              :      */
     803       270613 :     if (!(flags & CHECKPOINT_FAST) &&
     804        49240 :         !ShutdownXLOGPending &&
     805        48974 :         !ShutdownRequestPending &&
     806        91248 :         !FastCheckpointRequested() &&
     807        42274 :         IsCheckpointOnSchedule(progress))
     808              :     {
     809         7498 :         if (ConfigReloadPending)
     810              :         {
     811            0 :             ConfigReloadPending = false;
     812            0 :             ProcessConfigFile(PGC_SIGHUP);
     813              :             /* update shmem copies of config variables */
     814            0 :             UpdateSharedMemoryConfig();
     815              :         }
     816              : 
     817         7498 :         AbsorbSyncRequests();
     818         7498 :         absorb_counter = WRITES_PER_ABSORB;
     819              : 
     820         7498 :         CheckArchiveTimeout();
     821              : 
     822              :         /* Report interim statistics to the cumulative stats system */
     823         7498 :         pgstat_report_checkpointer();
     824              : 
     825              :         /*
     826              :          * This sleep used to be connected to bgwriter_delay, typically 200ms.
     827              :          * That resulted in more frequent wakeups if not much work to do.
     828              :          * Checkpointer and bgwriter are no longer related so take the Big
     829              :          * Sleep.
     830              :          */
     831         7498 :         WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT,
     832              :                   100,
     833              :                   WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
     834         7498 :         ResetLatch(MyLatch);
     835              :     }
     836       263115 :     else if (--absorb_counter <= 0)
     837              :     {
     838              :         /*
     839              :          * Absorb pending fsync requests after each WRITES_PER_ABSORB write
     840              :          * operations even when we don't sleep, to prevent overflow of the
     841              :          * fsync request queue.
     842              :          */
     843          134 :         AbsorbSyncRequests();
     844          134 :         absorb_counter = WRITES_PER_ABSORB;
     845              :     }
     846              : 
     847              :     /* Check for barrier events. */
     848       270613 :     if (ProcSignalBarrierPending)
     849            7 :         ProcessProcSignalBarrier();
     850              : }
     851              : 
     852              : /*
     853              :  * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
     854              :  *       (or restartpoint) in time?
     855              :  *
     856              :  * Compares the current progress against the time/segments elapsed since last
     857              :  * checkpoint, and returns true if the progress we've made this far is greater
     858              :  * than the elapsed time/segments.
     859              :  */
     860              : static bool
     861        42274 : IsCheckpointOnSchedule(double progress)
     862              : {
     863              :     XLogRecPtr  recptr;
     864              :     struct timeval now;
     865              :     double      elapsed_xlogs,
     866              :                 elapsed_time;
     867              : 
     868              :     Assert(ckpt_active);
     869              : 
     870              :     /* Scale progress according to checkpoint_completion_target. */
     871        42274 :     progress *= CheckPointCompletionTarget;
     872              : 
     873              :     /*
     874              :      * Check against the cached value first. Only do the more expensive
     875              :      * calculations once we reach the target previously calculated. Since
     876              :      * neither time or WAL insert pointer moves backwards, a freshly
     877              :      * calculated value can only be greater than or equal to the cached value.
     878              :      */
     879        42274 :     if (progress < ckpt_cached_elapsed)
     880        31107 :         return false;
     881              : 
     882              :     /*
     883              :      * Check progress against WAL segments written and CheckPointSegments.
     884              :      *
     885              :      * We compare the current WAL insert location against the location
     886              :      * computed before calling CreateCheckPoint. The code in XLogInsert that
     887              :      * actually triggers a checkpoint when CheckPointSegments is exceeded
     888              :      * compares against RedoRecPtr, so this is not completely accurate.
     889              :      * However, it's good enough for our purposes, we're only calculating an
     890              :      * estimate anyway.
     891              :      *
     892              :      * During recovery, we compare last replayed WAL record's location with
     893              :      * the location computed before calling CreateRestartPoint. That maintains
     894              :      * the same pacing as we have during checkpoints in normal operation, but
     895              :      * we might exceed max_wal_size by a fair amount. That's because there can
     896              :      * be a large gap between a checkpoint's redo-pointer and the checkpoint
     897              :      * record itself, and we only start the restartpoint after we've seen the
     898              :      * checkpoint record. (The gap is typically up to CheckPointSegments *
     899              :      * checkpoint_completion_target where checkpoint_completion_target is the
     900              :      * value that was in effect when the WAL was generated).
     901              :      */
     902        11167 :     if (RecoveryInProgress())
     903         5229 :         recptr = GetXLogReplayRecPtr(NULL);
     904              :     else
     905         5938 :         recptr = GetInsertRecPtr();
     906        11167 :     elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
     907        11167 :                      wal_segment_size) / CheckPointSegments;
     908              : 
     909        11167 :     if (progress < elapsed_xlogs)
     910              :     {
     911         3666 :         ckpt_cached_elapsed = elapsed_xlogs;
     912         3666 :         return false;
     913              :     }
     914              : 
     915              :     /*
     916              :      * Check progress against time elapsed and checkpoint_timeout.
     917              :      */
     918         7501 :     gettimeofday(&now, NULL);
     919         7501 :     elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
     920         7501 :                     now.tv_usec / 1000000.0) / CheckPointTimeout;
     921              : 
     922         7501 :     if (progress < elapsed_time)
     923              :     {
     924            3 :         ckpt_cached_elapsed = elapsed_time;
     925            3 :         return false;
     926              :     }
     927              : 
     928              :     /* It looks like we're on schedule. */
     929         7498 :     return true;
     930              : }
     931              : 
     932              : 
     933              : /* --------------------------------
     934              :  *      signal handler routines
     935              :  * --------------------------------
     936              :  */
     937              : 
     938              : /* SIGINT: set flag to trigger writing of shutdown checkpoint */
     939              : static void
     940          661 : ReqShutdownXLOG(SIGNAL_ARGS)
     941              : {
     942          661 :     ShutdownXLOGPending = true;
     943          661 :     SetLatch(MyLatch);
     944          661 : }
     945              : 
     946              : 
     947              : /* --------------------------------
     948              :  *      communication with backends
     949              :  * --------------------------------
     950              :  */
     951              : 
     952              : /*
     953              :  * CheckpointerShmemSize
     954              :  *      Compute space needed for checkpointer-related shared memory
     955              :  */
     956              : Size
     957         3387 : CheckpointerShmemSize(void)
     958              : {
     959              :     Size        size;
     960              : 
     961              :     /*
     962              :      * The size of the requests[] array is arbitrarily set equal to NBuffers.
     963              :      * But there is a cap of MAX_CHECKPOINT_REQUESTS to prevent accumulating
     964              :      * too many checkpoint requests in the ring buffer.
     965              :      */
     966         3387 :     size = offsetof(CheckpointerShmemStruct, requests);
     967         3387 :     size = add_size(size, mul_size(Min(NBuffers,
     968              :                                        MAX_CHECKPOINT_REQUESTS),
     969              :                                    sizeof(CheckpointerRequest)));
     970              : 
     971         3387 :     return size;
     972              : }
     973              : 
     974              : /*
     975              :  * CheckpointerShmemInit
     976              :  *      Allocate and initialize checkpointer-related shared memory
     977              :  */
     978              : void
     979         1180 : CheckpointerShmemInit(void)
     980              : {
     981         1180 :     Size        size = CheckpointerShmemSize();
     982              :     bool        found;
     983              : 
     984         1180 :     CheckpointerShmem = (CheckpointerShmemStruct *)
     985         1180 :         ShmemInitStruct("Checkpointer Data",
     986              :                         size,
     987              :                         &found);
     988              : 
     989         1180 :     if (!found)
     990              :     {
     991              :         /*
     992              :          * First time through, so initialize.  Note that we zero the whole
     993              :          * requests array; this is so that CompactCheckpointerRequestQueue can
     994              :          * assume that any pad bytes in the request structs are zeroes.
     995              :          */
     996         1324 :         MemSet(CheckpointerShmem, 0, size);
     997         1180 :         SpinLockInit(&CheckpointerShmem->ckpt_lck);
     998         1180 :         CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS);
     999         1180 :         CheckpointerShmem->head = CheckpointerShmem->tail = 0;
    1000         1180 :         ConditionVariableInit(&CheckpointerShmem->start_cv);
    1001         1180 :         ConditionVariableInit(&CheckpointerShmem->done_cv);
    1002              :     }
    1003         1180 : }
    1004              : 
    1005              : /*
    1006              :  * ExecCheckpoint
    1007              :  *      Primary entry point for manual CHECKPOINT commands
    1008              :  *
    1009              :  * This is mainly a wrapper for RequestCheckpoint().
    1010              :  */
    1011              : void
    1012          463 : ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt)
    1013              : {
    1014          463 :     bool        fast = true;
    1015          463 :     bool        unlogged = false;
    1016              : 
    1017          930 :     foreach_ptr(DefElem, opt, stmt->options)
    1018              :     {
    1019           20 :         if (strcmp(opt->defname, "mode") == 0)
    1020              :         {
    1021            8 :             char       *mode = defGetString(opt);
    1022              : 
    1023            8 :             if (strcmp(mode, "spread") == 0)
    1024            0 :                 fast = false;
    1025            8 :             else if (strcmp(mode, "fast") != 0)
    1026            4 :                 ereport(ERROR,
    1027              :                         (errcode(ERRCODE_SYNTAX_ERROR),
    1028              :                          errmsg("unrecognized value for %s option \"%s\": \"%s\"",
    1029              :                                 "CHECKPOINT", "mode", mode),
    1030              :                          parser_errposition(pstate, opt->location)));
    1031              :         }
    1032           12 :         else if (strcmp(opt->defname, "flush_unlogged") == 0)
    1033            8 :             unlogged = defGetBoolean(opt);
    1034              :         else
    1035            4 :             ereport(ERROR,
    1036              :                     (errcode(ERRCODE_SYNTAX_ERROR),
    1037              :                      errmsg("unrecognized %s option \"%s\"",
    1038              :                             "CHECKPOINT", opt->defname),
    1039              :                      parser_errposition(pstate, opt->location)));
    1040              :     }
    1041              : 
    1042          455 :     if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT))
    1043            0 :         ereport(ERROR,
    1044              :                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
    1045              :         /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */
    1046              :                  errmsg("permission denied to execute %s command",
    1047              :                         "CHECKPOINT"),
    1048              :                  errdetail("Only roles with privileges of the \"%s\" role may execute this command.",
    1049              :                            "pg_checkpoint")));
    1050              : 
    1051          910 :     RequestCheckpoint(CHECKPOINT_WAIT |
    1052          455 :                       (fast ? CHECKPOINT_FAST : 0) |
    1053          455 :                       (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) |
    1054          455 :                       (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
    1055          455 : }
    1056              : 
    1057              : /*
    1058              :  * RequestCheckpoint
    1059              :  *      Called in backend processes to request a checkpoint
    1060              :  *
    1061              :  * flags is a bitwise OR of the following:
    1062              :  *  CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
    1063              :  *  CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
    1064              :  *  CHECKPOINT_FAST: finish the checkpoint ASAP,
    1065              :  *      ignoring checkpoint_completion_target parameter.
    1066              :  *  CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
    1067              :  *      since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
    1068              :  *      CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command).
    1069              :  *  CHECKPOINT_WAIT: wait for completion before returning (otherwise,
    1070              :  *      just signal checkpointer to do it, and return).
    1071              :  *  CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
    1072              :  *      (This affects logging, and in particular enables CheckPointWarning.)
    1073              :  */
    1074              : void
    1075         2674 : RequestCheckpoint(int flags)
    1076              : {
    1077              :     int         ntries;
    1078              :     int         old_failed,
    1079              :                 old_started;
    1080              : 
    1081              :     /*
    1082              :      * If in a standalone backend, just do it ourselves.
    1083              :      */
    1084         2674 :     if (!IsPostmasterEnvironment)
    1085              :     {
    1086              :         /*
    1087              :          * There's no point in doing slow checkpoints in a standalone backend,
    1088              :          * because there's no other backends the checkpoint could disrupt.
    1089              :          */
    1090          205 :         CreateCheckPoint(flags | CHECKPOINT_FAST);
    1091              : 
    1092              :         /* Free all smgr objects, as CheckpointerMain() normally would. */
    1093          205 :         smgrdestroyall();
    1094              : 
    1095          205 :         return;
    1096              :     }
    1097              : 
    1098              :     /*
    1099              :      * Atomically set the request flags, and take a snapshot of the counters.
    1100              :      * When we see ckpt_started > old_started, we know the flags we set here
    1101              :      * have been seen by checkpointer.
    1102              :      *
    1103              :      * Note that we OR the flags with any existing flags, to avoid overriding
    1104              :      * a "stronger" request by another backend.  The flag senses must be
    1105              :      * chosen to make this work!
    1106              :      */
    1107         2469 :     SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1108              : 
    1109         2469 :     old_failed = CheckpointerShmem->ckpt_failed;
    1110         2469 :     old_started = CheckpointerShmem->ckpt_started;
    1111         2469 :     CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED);
    1112              : 
    1113         2469 :     SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1114              : 
    1115              :     /*
    1116              :      * Set checkpointer's latch to request checkpoint.  It's possible that the
    1117              :      * checkpointer hasn't started yet, so we will retry a few times if
    1118              :      * needed.  (Actually, more than a few times, since on slow or overloaded
    1119              :      * buildfarm machines, it's been observed that the checkpointer can take
    1120              :      * several seconds to start.)  However, if not told to wait for the
    1121              :      * checkpoint to occur, we consider failure to set the latch to be
    1122              :      * nonfatal and merely LOG it.  The checkpointer should see the request
    1123              :      * when it does start, with or without the SetLatch().
    1124              :      */
    1125              : #define MAX_SIGNAL_TRIES 600    /* max wait 60.0 sec */
    1126         2469 :     for (ntries = 0;; ntries++)
    1127           19 :     {
    1128         2488 :         volatile PROC_HDR *procglobal = ProcGlobal;
    1129         2488 :         ProcNumber  checkpointerProc = procglobal->checkpointerProc;
    1130              : 
    1131         2488 :         if (checkpointerProc == INVALID_PROC_NUMBER)
    1132              :         {
    1133           19 :             if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
    1134              :             {
    1135            0 :                 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
    1136              :                      "could not notify checkpoint: checkpointer is not running");
    1137            0 :                 break;
    1138              :             }
    1139              :         }
    1140              :         else
    1141              :         {
    1142         2469 :             SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
    1143              :             /* notified successfully */
    1144         2469 :             break;
    1145              :         }
    1146              : 
    1147           19 :         CHECK_FOR_INTERRUPTS();
    1148           19 :         pg_usleep(100000L);     /* wait 0.1 sec, then retry */
    1149              :     }
    1150              : 
    1151              :     /*
    1152              :      * If requested, wait for completion.  We detect completion according to
    1153              :      * the algorithm given above.
    1154              :      */
    1155         2469 :     if (flags & CHECKPOINT_WAIT)
    1156              :     {
    1157              :         int         new_started,
    1158              :                     new_failed;
    1159              : 
    1160              :         /* Wait for a new checkpoint to start. */
    1161          855 :         ConditionVariablePrepareToSleep(&CheckpointerShmem->start_cv);
    1162              :         for (;;)
    1163              :         {
    1164         1579 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1165         1579 :             new_started = CheckpointerShmem->ckpt_started;
    1166         1579 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1167              : 
    1168         1579 :             if (new_started != old_started)
    1169          855 :                 break;
    1170              : 
    1171          724 :             ConditionVariableSleep(&CheckpointerShmem->start_cv,
    1172              :                                    WAIT_EVENT_CHECKPOINT_START);
    1173              :         }
    1174          855 :         ConditionVariableCancelSleep();
    1175              : 
    1176              :         /*
    1177              :          * We are waiting for ckpt_done >= new_started, in a modulo sense.
    1178              :          */
    1179          855 :         ConditionVariablePrepareToSleep(&CheckpointerShmem->done_cv);
    1180              :         for (;;)
    1181          652 :         {
    1182              :             int         new_done;
    1183              : 
    1184         1507 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1185         1507 :             new_done = CheckpointerShmem->ckpt_done;
    1186         1507 :             new_failed = CheckpointerShmem->ckpt_failed;
    1187         1507 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1188              : 
    1189         1507 :             if (new_done - new_started >= 0)
    1190          855 :                 break;
    1191              : 
    1192          652 :             ConditionVariableSleep(&CheckpointerShmem->done_cv,
    1193              :                                    WAIT_EVENT_CHECKPOINT_DONE);
    1194              :         }
    1195          855 :         ConditionVariableCancelSleep();
    1196              : 
    1197          855 :         if (new_failed != old_failed)
    1198            0 :             ereport(ERROR,
    1199              :                     (errmsg("checkpoint request failed"),
    1200              :                      errhint("Consult recent messages in the server log for details.")));
    1201              :     }
    1202              : }
    1203              : 
    1204              : /*
    1205              :  * ForwardSyncRequest
    1206              :  *      Forward a file-fsync request from a backend to the checkpointer
    1207              :  *
    1208              :  * Whenever a backend is compelled to write directly to a relation
    1209              :  * (which should be seldom, if the background writer is getting its job done),
    1210              :  * the backend calls this routine to pass over knowledge that the relation
    1211              :  * is dirty and must be fsync'd before next checkpoint.  We also use this
    1212              :  * opportunity to count such writes for statistical purposes.
    1213              :  *
    1214              :  * To avoid holding the lock for longer than necessary, we normally write
    1215              :  * to the requests[] queue without checking for duplicates.  The checkpointer
    1216              :  * will have to eliminate dups internally anyway.  However, if we discover
    1217              :  * that the queue is full, we make a pass over the entire queue to compact
    1218              :  * it.  This is somewhat expensive, but the alternative is for the backend
    1219              :  * to perform its own fsync, which is far more expensive in practice.  It
    1220              :  * is theoretically possible a backend fsync might still be necessary, if
    1221              :  * the queue is full and contains no duplicate entries.  In that case, we
    1222              :  * let the backend know by returning false.
    1223              :  */
    1224              : bool
    1225      1357651 : ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
    1226              : {
    1227              :     CheckpointerRequest *request;
    1228              :     bool        too_full;
    1229              :     int         insert_pos;
    1230              : 
    1231      1357651 :     if (!IsUnderPostmaster)
    1232            0 :         return false;           /* probably shouldn't even get here */
    1233              : 
    1234      1357651 :     if (AmCheckpointerProcess())
    1235            0 :         elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
    1236              : 
    1237      1357651 :     LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
    1238              : 
    1239              :     /*
    1240              :      * If the checkpointer isn't running or the request queue is full, the
    1241              :      * backend will have to perform its own fsync request.  But before forcing
    1242              :      * that to happen, we can try to compact the request queue.
    1243              :      */
    1244      1357651 :     if (CheckpointerShmem->checkpointer_pid == 0 ||
    1245      1357109 :         (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
    1246          269 :          !CompactCheckpointerRequestQueue()))
    1247              :     {
    1248          601 :         LWLockRelease(CheckpointerCommLock);
    1249          601 :         return false;
    1250              :     }
    1251              : 
    1252              :     /* OK, insert request */
    1253      1357050 :     insert_pos = CheckpointerShmem->tail;
    1254      1357050 :     request = &CheckpointerShmem->requests[insert_pos];
    1255      1357050 :     request->ftag = *ftag;
    1256      1357050 :     request->type = type;
    1257              : 
    1258      1357050 :     CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests;
    1259      1357050 :     CheckpointerShmem->num_requests++;
    1260              : 
    1261              :     /* If queue is more than half full, nudge the checkpointer to empty it */
    1262      1357050 :     too_full = (CheckpointerShmem->num_requests >=
    1263      1357050 :                 CheckpointerShmem->max_requests / 2);
    1264              : 
    1265      1357050 :     LWLockRelease(CheckpointerCommLock);
    1266              : 
    1267              :     /* ... but not till after we release the lock */
    1268      1357050 :     if (too_full)
    1269              :     {
    1270        37830 :         volatile PROC_HDR *procglobal = ProcGlobal;
    1271        37830 :         ProcNumber  checkpointerProc = procglobal->checkpointerProc;
    1272              : 
    1273        37830 :         if (checkpointerProc != INVALID_PROC_NUMBER)
    1274        37830 :             SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
    1275              :     }
    1276              : 
    1277      1357050 :     return true;
    1278              : }
    1279              : 
    1280              : /*
    1281              :  * CompactCheckpointerRequestQueue
    1282              :  *      Remove duplicates from the request queue to avoid backend fsyncs.
    1283              :  *      Returns "true" if any entries were removed.
    1284              :  *
    1285              :  * Although a full fsync request queue is not common, it can lead to severe
    1286              :  * performance problems when it does happen.  So far, this situation has
    1287              :  * only been observed to occur when the system is under heavy write load,
    1288              :  * and especially during the "sync" phase of a checkpoint.  Without this
    1289              :  * logic, each backend begins doing an fsync for every block written, which
    1290              :  * gets very expensive and can slow down the whole system.
    1291              :  *
    1292              :  * Trying to do this every time the queue is full could lose if there
    1293              :  * aren't any removable entries.  But that should be vanishingly rare in
    1294              :  * practice: there's one queue entry per shared buffer.
    1295              :  */
    1296              : static bool
    1297          269 : CompactCheckpointerRequestQueue(void)
    1298              : {
    1299              :     struct CheckpointerSlotMapping
    1300              :     {
    1301              :         CheckpointerRequest request;
    1302              :         int         ring_idx;
    1303              :     };
    1304              : 
    1305              :     int         n;
    1306          269 :     int         num_skipped = 0;
    1307              :     int         head;
    1308              :     int         max_requests;
    1309              :     int         num_requests;
    1310              :     int         read_idx,
    1311              :                 write_idx;
    1312              :     HASHCTL     ctl;
    1313              :     HTAB       *htab;
    1314              :     bool       *skip_slot;
    1315              : 
    1316              :     /* must hold CheckpointerCommLock in exclusive mode */
    1317              :     Assert(LWLockHeldByMe(CheckpointerCommLock));
    1318              : 
    1319              :     /* Avoid memory allocations in a critical section. */
    1320          269 :     if (CritSectionCount > 0)
    1321            0 :         return false;
    1322              : 
    1323          269 :     max_requests = CheckpointerShmem->max_requests;
    1324          269 :     num_requests = CheckpointerShmem->num_requests;
    1325              : 
    1326              :     /* Initialize skip_slot array */
    1327          269 :     skip_slot = palloc0_array(bool, max_requests);
    1328              : 
    1329          269 :     head = CheckpointerShmem->head;
    1330              : 
    1331              :     /* Initialize temporary hash table */
    1332          269 :     ctl.keysize = sizeof(CheckpointerRequest);
    1333          269 :     ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
    1334          269 :     ctl.hcxt = CurrentMemoryContext;
    1335              : 
    1336          269 :     htab = hash_create("CompactCheckpointerRequestQueue",
    1337          269 :                        CheckpointerShmem->num_requests,
    1338              :                        &ctl,
    1339              :                        HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
    1340              : 
    1341              :     /*
    1342              :      * The basic idea here is that a request can be skipped if it's followed
    1343              :      * by a later, identical request.  It might seem more sensible to work
    1344              :      * backwards from the end of the queue and check whether a request is
    1345              :      * *preceded* by an earlier, identical request, in the hopes of doing less
    1346              :      * copying.  But that might change the semantics, if there's an
    1347              :      * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
    1348              :      * this way.  It would be possible to be even smarter if we made the code
    1349              :      * below understand the specific semantics of such requests (it could blow
    1350              :      * away preceding entries that would end up being canceled anyhow), but
    1351              :      * it's not clear that the extra complexity would buy us anything.
    1352              :      */
    1353          269 :     read_idx = head;
    1354        23725 :     for (n = 0; n < num_requests; n++)
    1355              :     {
    1356              :         CheckpointerRequest *request;
    1357              :         struct CheckpointerSlotMapping *slotmap;
    1358              :         bool        found;
    1359              : 
    1360              :         /*
    1361              :          * We use the request struct directly as a hashtable key.  This
    1362              :          * assumes that any padding bytes in the structs are consistently the
    1363              :          * same, which should be okay because we zeroed them in
    1364              :          * CheckpointerShmemInit.  Note also that RelFileLocator had better
    1365              :          * contain no pad bytes.
    1366              :          */
    1367        23456 :         request = &CheckpointerShmem->requests[read_idx];
    1368        23456 :         slotmap = hash_search(htab, request, HASH_ENTER, &found);
    1369        23456 :         if (found)
    1370              :         {
    1371              :             /* Duplicate, so mark the previous occurrence as skippable */
    1372        11222 :             skip_slot[slotmap->ring_idx] = true;
    1373        11222 :             num_skipped++;
    1374              :         }
    1375              :         /* Remember slot containing latest occurrence of this request value */
    1376        23456 :         slotmap->ring_idx = read_idx;
    1377              : 
    1378              :         /* Move to the next request in the ring buffer */
    1379        23456 :         read_idx = (read_idx + 1) % max_requests;
    1380              :     }
    1381              : 
    1382              :     /* Done with the hash table. */
    1383          269 :     hash_destroy(htab);
    1384              : 
    1385              :     /* If no duplicates, we're out of luck. */
    1386          269 :     if (!num_skipped)
    1387              :     {
    1388           59 :         pfree(skip_slot);
    1389           59 :         return false;
    1390              :     }
    1391              : 
    1392              :     /* We found some duplicates; remove them. */
    1393          210 :     read_idx = write_idx = head;
    1394        19474 :     for (n = 0; n < num_requests; n++)
    1395              :     {
    1396              :         /* If this slot is NOT skipped, keep it */
    1397        19264 :         if (!skip_slot[read_idx])
    1398              :         {
    1399              :             /* If the read and write positions are different, copy the request */
    1400         8042 :             if (write_idx != read_idx)
    1401         6869 :                 CheckpointerShmem->requests[write_idx] =
    1402         6869 :                     CheckpointerShmem->requests[read_idx];
    1403              : 
    1404              :             /* Advance the write position */
    1405         8042 :             write_idx = (write_idx + 1) % max_requests;
    1406              :         }
    1407              : 
    1408        19264 :         read_idx = (read_idx + 1) % max_requests;
    1409              :     }
    1410              : 
    1411              :     /*
    1412              :      * Update ring buffer state: head remains the same, tail moves, count
    1413              :      * decreases
    1414              :      */
    1415          210 :     CheckpointerShmem->tail = write_idx;
    1416          210 :     CheckpointerShmem->num_requests -= num_skipped;
    1417              : 
    1418          210 :     ereport(DEBUG1,
    1419              :             (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
    1420              :                              num_requests, CheckpointerShmem->num_requests)));
    1421              : 
    1422              :     /* Cleanup. */
    1423          210 :     pfree(skip_slot);
    1424          210 :     return true;
    1425              : }
    1426              : 
    1427              : /*
    1428              :  * AbsorbSyncRequests
    1429              :  *      Retrieve queued sync requests and pass them to sync mechanism.
    1430              :  *
    1431              :  * This is exported because it must be called during CreateCheckPoint;
    1432              :  * we have to be sure we have accepted all pending requests just before
    1433              :  * we start fsync'ing.  Since CreateCheckPoint sometimes runs in
    1434              :  * non-checkpointer processes, do nothing if not checkpointer.
    1435              :  */
    1436              : void
    1437        19627 : AbsorbSyncRequests(void)
    1438              : {
    1439        19627 :     CheckpointerRequest *requests = NULL;
    1440              :     CheckpointerRequest *request;
    1441              :     int         n,
    1442              :                 i;
    1443              :     bool        loop;
    1444              : 
    1445        19627 :     if (!AmCheckpointerProcess())
    1446          652 :         return;
    1447              : 
    1448              :     do
    1449              :     {
    1450        18976 :         LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
    1451              : 
    1452              :         /*---
    1453              :          * We try to avoid holding the lock for a long time by:
    1454              :          * 1. Copying the request array and processing the requests after
    1455              :          *    releasing the lock;
    1456              :          * 2. Processing not the whole queue, but only batches of
    1457              :          *    CKPT_REQ_BATCH_SIZE at once.
    1458              :          *
    1459              :          * Once we have cleared the requests from shared memory, we must
    1460              :          * PANIC if we then fail to absorb them (e.g., because our hashtable
    1461              :          * runs out of memory).  This is because the system cannot run safely
    1462              :          * if we are unable to fsync what we have been told to fsync.
    1463              :          * Fortunately, the hashtable is so small that the problem is quite
    1464              :          * unlikely to arise in practice.
    1465              :          *
    1466              :          * Note: The maximum possible size of a ring buffer is
    1467              :          * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc
    1468              :          * allocation size of 1Gb.  Our maximum batch size,
    1469              :          * CKPT_REQ_BATCH_SIZE, is even smaller.
    1470              :          */
    1471        18976 :         n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE);
    1472        18976 :         if (n > 0)
    1473              :         {
    1474         9517 :             if (!requests)
    1475         9516 :                 requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
    1476              : 
    1477      1195416 :             for (i = 0; i < n; i++)
    1478              :             {
    1479      1185899 :                 requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head];
    1480      1185899 :                 CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests;
    1481              :             }
    1482              : 
    1483         9517 :             CheckpointerShmem->num_requests -= n;
    1484              : 
    1485              :         }
    1486              : 
    1487        18976 :         START_CRIT_SECTION();
    1488              : 
    1489              :         /* Are there any requests in the queue? If so, keep going. */
    1490        18976 :         loop = CheckpointerShmem->num_requests != 0;
    1491              : 
    1492        18976 :         LWLockRelease(CheckpointerCommLock);
    1493              : 
    1494      1204875 :         for (request = requests; n > 0; request++, n--)
    1495      1185899 :             RememberSyncRequest(&request->ftag, request->type);
    1496              : 
    1497        18976 :         END_CRIT_SECTION();
    1498        18976 :     } while (loop);
    1499              : 
    1500        18975 :     if (requests)
    1501         9516 :         pfree(requests);
    1502              : }
    1503              : 
    1504              : /*
    1505              :  * Update any shared memory configurations based on config parameters
    1506              :  */
    1507              : static void
    1508          685 : UpdateSharedMemoryConfig(void)
    1509              : {
    1510              :     /* update global shmem state for sync rep */
    1511          685 :     SyncRepUpdateSyncStandbysDefined();
    1512              : 
    1513              :     /*
    1514              :      * If full_page_writes has been changed by SIGHUP, we update it in shared
    1515              :      * memory and write an XLOG_FPW_CHANGE record.
    1516              :      */
    1517          685 :     UpdateFullPageWrites();
    1518              : 
    1519          685 :     elog(DEBUG2, "checkpointer updated shared memory configuration values");
    1520          685 : }
    1521              : 
    1522              : /*
    1523              :  * FirstCallSinceLastCheckpoint allows a process to take an action once
    1524              :  * per checkpoint cycle by asynchronously checking for checkpoint completion.
    1525              :  */
    1526              : bool
    1527        14645 : FirstCallSinceLastCheckpoint(void)
    1528              : {
    1529              :     static int  ckpt_done = 0;
    1530              :     int         new_done;
    1531        14645 :     bool        FirstCall = false;
    1532              : 
    1533        14645 :     SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1534        14645 :     new_done = CheckpointerShmem->ckpt_done;
    1535        14645 :     SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1536              : 
    1537        14645 :     if (new_done != ckpt_done)
    1538          590 :         FirstCall = true;
    1539              : 
    1540        14645 :     ckpt_done = new_done;
    1541              : 
    1542        14645 :     return FirstCall;
    1543              : }
    1544              : 
    1545              : /*
    1546              :  * Wake up the checkpointer process.
    1547              :  */
    1548              : void
    1549          981 : WakeupCheckpointer(void)
    1550              : {
    1551          981 :     volatile PROC_HDR *procglobal = ProcGlobal;
    1552          981 :     ProcNumber  checkpointerProc = procglobal->checkpointerProc;
    1553              : 
    1554          981 :     if (checkpointerProc != INVALID_PROC_NUMBER)
    1555          665 :         SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
    1556          981 : }
        

Generated by: LCOV version 2.0-1