LCOV - code coverage report
Current view: top level - src/backend/postmaster - checkpointer.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 353 406 86.9 %
Date: 2026-02-07 03:17:59 Functions: 17 17 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * checkpointer.c
       4             :  *
       5             :  * The checkpointer is new as of Postgres 9.2.  It handles all checkpoints.
       6             :  * Checkpoints are automatically dispatched after a certain amount of time has
       7             :  * elapsed since the last one, and it can be signaled to perform requested
       8             :  * checkpoints as well.  (The GUC parameter that mandates a checkpoint every
       9             :  * so many WAL segments is implemented by having backends signal when they
      10             :  * fill WAL segments; the checkpointer itself doesn't watch for the
      11             :  * condition.)
      12             :  *
      13             :  * The normal termination sequence is that checkpointer is instructed to
      14             :  * execute the shutdown checkpoint by SIGINT.  After that checkpointer waits
      15             :  * to be terminated via SIGUSR2, which instructs the checkpointer to exit(0).
      16             :  * All backends must be stopped before SIGINT or SIGUSR2 is issued!
      17             :  *
      18             :  * Emergency termination is by SIGQUIT; like any backend, the checkpointer
      19             :  * will simply abort and exit on SIGQUIT.
      20             :  *
      21             :  * If the checkpointer exits unexpectedly, the postmaster treats that the same
      22             :  * as a backend crash: shared memory may be corrupted, so remaining backends
      23             :  * should be killed by SIGQUIT and then a recovery cycle started.  (Even if
      24             :  * shared memory isn't corrupted, we have lost information about which
      25             :  * files need to be fsync'd for the next checkpoint, and so a system
      26             :  * restart needs to be forced.)
      27             :  *
      28             :  *
      29             :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
      30             :  *
      31             :  *
      32             :  * IDENTIFICATION
      33             :  *    src/backend/postmaster/checkpointer.c
      34             :  *
      35             :  *-------------------------------------------------------------------------
      36             :  */
      37             : #include "postgres.h"
      38             : 
      39             : #include <sys/time.h>
      40             : #include <time.h>
      41             : 
      42             : #include "access/xlog.h"
      43             : #include "access/xlog_internal.h"
      44             : #include "access/xlogrecovery.h"
      45             : #include "catalog/pg_authid.h"
      46             : #include "commands/defrem.h"
      47             : #include "libpq/pqsignal.h"
      48             : #include "miscadmin.h"
      49             : #include "pgstat.h"
      50             : #include "postmaster/auxprocess.h"
      51             : #include "postmaster/bgwriter.h"
      52             : #include "postmaster/interrupt.h"
      53             : #include "replication/syncrep.h"
      54             : #include "storage/aio_subsys.h"
      55             : #include "storage/bufmgr.h"
      56             : #include "storage/condition_variable.h"
      57             : #include "storage/fd.h"
      58             : #include "storage/ipc.h"
      59             : #include "storage/lwlock.h"
      60             : #include "storage/pmsignal.h"
      61             : #include "storage/proc.h"
      62             : #include "storage/procsignal.h"
      63             : #include "storage/shmem.h"
      64             : #include "storage/smgr.h"
      65             : #include "storage/spin.h"
      66             : #include "utils/acl.h"
      67             : #include "utils/guc.h"
      68             : #include "utils/memutils.h"
      69             : #include "utils/resowner.h"
      70             : 
      71             : 
      72             : /*----------
      73             :  * Shared memory area for communication between checkpointer and backends
      74             :  *
      75             :  * The ckpt counters allow backends to watch for completion of a checkpoint
      76             :  * request they send.  Here's how it works:
      77             :  *  * At start of a checkpoint, checkpointer reads (and clears) the request
      78             :  *    flags and increments ckpt_started, while holding ckpt_lck.
      79             :  *  * On completion of a checkpoint, checkpointer sets ckpt_done to
      80             :  *    equal ckpt_started.
      81             :  *  * On failure of a checkpoint, checkpointer increments ckpt_failed
      82             :  *    and sets ckpt_done to equal ckpt_started.
      83             :  *
      84             :  * The algorithm for backends is:
      85             :  *  1. Record current values of ckpt_failed and ckpt_started, and
      86             :  *     set request flags, while holding ckpt_lck.
      87             :  *  2. Send signal to request checkpoint.
      88             :  *  3. Sleep until ckpt_started changes.  Now you know a checkpoint has
      89             :  *     begun since you started this algorithm (although *not* that it was
      90             :  *     specifically initiated by your signal), and that it is using your flags.
      91             :  *  4. Record new value of ckpt_started.
      92             :  *  5. Sleep until ckpt_done >= saved value of ckpt_started.  (Use modulo
      93             :  *     arithmetic here in case counters wrap around.)  Now you know a
      94             :  *     checkpoint has started and completed, but not whether it was
      95             :  *     successful.
      96             :  *  6. If ckpt_failed is different from the originally saved value,
      97             :  *     assume request failed; otherwise it was definitely successful.
      98             :  *
      99             :  * ckpt_flags holds the OR of the checkpoint request flags sent by all
     100             :  * requesting backends since the last checkpoint start.  The flags are
     101             :  * chosen so that OR'ing is the correct way to combine multiple requests.
     102             :  *
     103             :  * The requests array holds fsync requests sent by backends and not yet
     104             :  * absorbed by the checkpointer.
     105             :  *
     106             :  * Unlike the checkpoint fields, requests related fields are protected by
     107             :  * CheckpointerCommLock.
     108             :  *----------
     109             :  */
     110             : typedef struct
     111             : {
     112             :     SyncRequestType type;       /* request type */
     113             :     FileTag     ftag;           /* file identifier */
     114             : } CheckpointerRequest;
     115             : 
     116             : typedef struct
     117             : {
     118             :     pid_t       checkpointer_pid;   /* PID (0 if not started) */
     119             : 
     120             :     slock_t     ckpt_lck;       /* protects all the ckpt_* fields */
     121             : 
     122             :     int         ckpt_started;   /* advances when checkpoint starts */
     123             :     int         ckpt_done;      /* advances when checkpoint done */
     124             :     int         ckpt_failed;    /* advances when checkpoint fails */
     125             : 
     126             :     int         ckpt_flags;     /* checkpoint flags, as defined in xlog.h */
     127             : 
     128             :     ConditionVariable start_cv; /* signaled when ckpt_started advances */
     129             :     ConditionVariable done_cv;  /* signaled when ckpt_done advances */
     130             : 
     131             :     int         num_requests;   /* current # of requests */
     132             :     int         max_requests;   /* allocated array size */
     133             : 
     134             :     int         head;           /* Index of the first request in the ring
     135             :                                  * buffer */
     136             :     int         tail;           /* Index of the last request in the ring
     137             :                                  * buffer */
     138             : 
     139             :     /* The ring buffer of pending checkpointer requests */
     140             :     CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
     141             : } CheckpointerShmemStruct;
     142             : 
     143             : static CheckpointerShmemStruct *CheckpointerShmem;
     144             : 
     145             : /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
     146             : #define WRITES_PER_ABSORB       1000
     147             : 
     148             : /* Maximum number of checkpointer requests to process in one batch */
     149             : #define CKPT_REQ_BATCH_SIZE 10000
     150             : 
     151             : /* Max number of requests the checkpointer request queue can hold */
     152             : #define MAX_CHECKPOINT_REQUESTS 10000000
     153             : 
     154             : /*
     155             :  * GUC parameters
     156             :  */
     157             : int         CheckPointTimeout = 300;
     158             : int         CheckPointWarning = 30;
     159             : double      CheckPointCompletionTarget = 0.9;
     160             : 
     161             : /*
     162             :  * Private state
     163             :  */
     164             : static bool ckpt_active = false;
     165             : static volatile sig_atomic_t ShutdownXLOGPending = false;
     166             : 
     167             : /* these values are valid when ckpt_active is true: */
     168             : static pg_time_t ckpt_start_time;
     169             : static XLogRecPtr ckpt_start_recptr;
     170             : static double ckpt_cached_elapsed;
     171             : 
     172             : static pg_time_t last_checkpoint_time;
     173             : static pg_time_t last_xlog_switch_time;
     174             : 
     175             : /* Prototypes for private functions */
     176             : 
     177             : static void ProcessCheckpointerInterrupts(void);
     178             : static void CheckArchiveTimeout(void);
     179             : static bool IsCheckpointOnSchedule(double progress);
     180             : static bool FastCheckpointRequested(void);
     181             : static bool CompactCheckpointerRequestQueue(void);
     182             : static void UpdateSharedMemoryConfig(void);
     183             : 
     184             : /* Signal handlers */
     185             : static void ReqShutdownXLOG(SIGNAL_ARGS);
     186             : 
     187             : 
     188             : /*
     189             :  * Main entry point for checkpointer process
     190             :  *
     191             :  * This is invoked from AuxiliaryProcessMain, which has already created the
     192             :  * basic execution environment, but not enabled signals yet.
     193             :  */
     194             : void
     195        1142 : CheckpointerMain(const void *startup_data, size_t startup_data_len)
     196             : {
     197             :     sigjmp_buf  local_sigjmp_buf;
     198             :     MemoryContext checkpointer_context;
     199             : 
     200             :     Assert(startup_data_len == 0);
     201             : 
     202        1142 :     AuxiliaryProcessMainCommon();
     203             : 
     204        1142 :     CheckpointerShmem->checkpointer_pid = MyProcPid;
     205             : 
     206             :     /*
     207             :      * Properly accept or ignore signals the postmaster might send us
     208             :      *
     209             :      * Note: we deliberately ignore SIGTERM, because during a standard Unix
     210             :      * system shutdown cycle, init will SIGTERM all processes at once.  We
     211             :      * want to wait for the backends to exit, whereupon the postmaster will
     212             :      * tell us it's okay to shut down (via SIGUSR2).
     213             :      */
     214        1142 :     pqsignal(SIGHUP, SignalHandlerForConfigReload);
     215        1142 :     pqsignal(SIGINT, ReqShutdownXLOG);
     216        1142 :     pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
     217             :     /* SIGQUIT handler was already set up by InitPostmasterChild */
     218        1142 :     pqsignal(SIGALRM, SIG_IGN);
     219        1142 :     pqsignal(SIGPIPE, SIG_IGN);
     220        1142 :     pqsignal(SIGUSR1, procsignal_sigusr1_handler);
     221        1142 :     pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
     222             : 
     223             :     /*
     224             :      * Reset some signals that are accepted by postmaster but not here
     225             :      */
     226        1142 :     pqsignal(SIGCHLD, SIG_DFL);
     227             : 
     228             :     /*
     229             :      * Initialize so that first time-driven event happens at the correct time.
     230             :      */
     231        1142 :     last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
     232             : 
     233             :     /*
     234             :      * Write out stats after shutdown. This needs to be called by exactly one
     235             :      * process during a normal shutdown, and since checkpointer is shut down
     236             :      * very late...
     237             :      *
     238             :      * While e.g. walsenders are active after the shutdown checkpoint has been
     239             :      * written (and thus could produce more stats), checkpointer stays around
     240             :      * after the shutdown checkpoint has been written. postmaster will only
     241             :      * signal checkpointer to exit after all processes that could emit stats
     242             :      * have been shut down.
     243             :      */
     244        1142 :     before_shmem_exit(pgstat_before_server_shutdown, 0);
     245             : 
     246             :     /*
     247             :      * Create a memory context that we will do all our work in.  We do this so
     248             :      * that we can reset the context during error recovery and thereby avoid
     249             :      * possible memory leaks.  Formerly this code just ran in
     250             :      * TopMemoryContext, but resetting that would be a really bad idea.
     251             :      */
     252        1142 :     checkpointer_context = AllocSetContextCreate(TopMemoryContext,
     253             :                                                  "Checkpointer",
     254             :                                                  ALLOCSET_DEFAULT_SIZES);
     255        1142 :     MemoryContextSwitchTo(checkpointer_context);
     256             : 
     257             :     /*
     258             :      * If an exception is encountered, processing resumes here.
     259             :      *
     260             :      * You might wonder why this isn't coded as an infinite loop around a
     261             :      * PG_TRY construct.  The reason is that this is the bottom of the
     262             :      * exception stack, and so with PG_TRY there would be no exception handler
     263             :      * in force at all during the CATCH part.  By leaving the outermost setjmp
     264             :      * always active, we have at least some chance of recovering from an error
     265             :      * during error recovery.  (If we get into an infinite loop thereby, it
     266             :      * will soon be stopped by overflow of elog.c's internal state stack.)
     267             :      *
     268             :      * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
     269             :      * (to wit, BlockSig) will be restored when longjmp'ing to here.  Thus,
     270             :      * signals other than SIGQUIT will be blocked until we complete error
     271             :      * recovery.  It might seem that this policy makes the HOLD_INTERRUPTS()
     272             :      * call redundant, but it is not since InterruptPending might be set
     273             :      * already.
     274             :      */
     275        1142 :     if (sigsetjmp(local_sigjmp_buf, 1) != 0)
     276             :     {
     277             :         /* Since not using PG_TRY, must reset error stack by hand */
     278           0 :         error_context_stack = NULL;
     279             : 
     280             :         /* Prevent interrupts while cleaning up */
     281           0 :         HOLD_INTERRUPTS();
     282             : 
     283             :         /* Report the error to the server log */
     284           0 :         EmitErrorReport();
     285             : 
     286             :         /*
     287             :          * These operations are really just a minimal subset of
     288             :          * AbortTransaction().  We don't have very many resources to worry
     289             :          * about in checkpointer, but we do have LWLocks, buffers, and temp
     290             :          * files.
     291             :          */
     292           0 :         LWLockReleaseAll();
     293           0 :         ConditionVariableCancelSleep();
     294           0 :         pgstat_report_wait_end();
     295           0 :         pgaio_error_cleanup();
     296           0 :         UnlockBuffers();
     297           0 :         ReleaseAuxProcessResources(false);
     298           0 :         AtEOXact_Buffers(false);
     299           0 :         AtEOXact_SMgr();
     300           0 :         AtEOXact_Files(false);
     301           0 :         AtEOXact_HashTables(false);
     302             : 
     303             :         /* Warn any waiting backends that the checkpoint failed. */
     304           0 :         if (ckpt_active)
     305             :         {
     306           0 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
     307           0 :             CheckpointerShmem->ckpt_failed++;
     308           0 :             CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
     309           0 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
     310             : 
     311           0 :             ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
     312             : 
     313           0 :             ckpt_active = false;
     314             :         }
     315             : 
     316             :         /*
     317             :          * Now return to normal top-level context and clear ErrorContext for
     318             :          * next time.
     319             :          */
     320           0 :         MemoryContextSwitchTo(checkpointer_context);
     321           0 :         FlushErrorState();
     322             : 
     323             :         /* Flush any leaked data in the top-level context */
     324           0 :         MemoryContextReset(checkpointer_context);
     325             : 
     326             :         /* Now we can allow interrupts again */
     327           0 :         RESUME_INTERRUPTS();
     328             : 
     329             :         /*
     330             :          * Sleep at least 1 second after any error.  A write error is likely
     331             :          * to be repeated, and we don't want to be filling the error logs as
     332             :          * fast as we can.
     333             :          */
     334           0 :         pg_usleep(1000000L);
     335             :     }
     336             : 
     337             :     /* We can now handle ereport(ERROR) */
     338        1142 :     PG_exception_stack = &local_sigjmp_buf;
     339             : 
     340             :     /*
     341             :      * Unblock signals (they were blocked when the postmaster forked us)
     342             :      */
     343        1142 :     sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
     344             : 
     345             :     /*
     346             :      * Ensure all shared memory values are set correctly for the config. Doing
     347             :      * this here ensures no race conditions from other concurrent updaters.
     348             :      */
     349        1142 :     UpdateSharedMemoryConfig();
     350             : 
     351             :     /*
     352             :      * Advertise our proc number that backends can use to wake us up while
     353             :      * we're sleeping.
     354             :      */
     355        1142 :     ProcGlobal->checkpointerProc = MyProcNumber;
     356             : 
     357             :     /*
     358             :      * Loop until we've been asked to write the shutdown checkpoint or
     359             :      * terminate.
     360             :      */
     361             :     for (;;)
     362        8752 :     {
     363        9894 :         bool        do_checkpoint = false;
     364        9894 :         int         flags = 0;
     365             :         pg_time_t   now;
     366             :         int         elapsed_secs;
     367             :         int         cur_timeout;
     368        9894 :         bool        chkpt_or_rstpt_requested = false;
     369        9894 :         bool        chkpt_or_rstpt_timed = false;
     370             : 
     371             :         /* Clear any already-pending wakeups */
     372        9894 :         ResetLatch(MyLatch);
     373             : 
     374             :         /*
     375             :          * Process any requests or signals received recently.
     376             :          */
     377        9894 :         AbsorbSyncRequests();
     378             : 
     379        9894 :         ProcessCheckpointerInterrupts();
     380        9894 :         if (ShutdownXLOGPending || ShutdownRequestPending)
     381             :             break;
     382             : 
     383             :         /*
     384             :          * Detect a pending checkpoint request by checking whether the flags
     385             :          * word in shared memory is nonzero.  We shouldn't need to acquire the
     386             :          * ckpt_lck for this.
     387             :          */
     388        8778 :         if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
     389             :         {
     390        2610 :             do_checkpoint = true;
     391        2610 :             chkpt_or_rstpt_requested = true;
     392             :         }
     393             : 
     394             :         /*
     395             :          * Force a checkpoint if too much time has elapsed since the last one.
     396             :          * Note that we count a timed checkpoint in stats only when this
     397             :          * occurs without an external request, but we set the CAUSE_TIME flag
     398             :          * bit even if there is also an external request.
     399             :          */
     400        8778 :         now = (pg_time_t) time(NULL);
     401        8778 :         elapsed_secs = now - last_checkpoint_time;
     402        8778 :         if (elapsed_secs >= CheckPointTimeout)
     403             :         {
     404           2 :             if (!do_checkpoint)
     405           2 :                 chkpt_or_rstpt_timed = true;
     406           2 :             do_checkpoint = true;
     407           2 :             flags |= CHECKPOINT_CAUSE_TIME;
     408             :         }
     409             : 
     410             :         /*
     411             :          * Do a checkpoint if requested.
     412             :          */
     413        8778 :         if (do_checkpoint)
     414             :         {
     415        2612 :             bool        ckpt_performed = false;
     416             :             bool        do_restartpoint;
     417             : 
     418             :             /* Check if we should perform a checkpoint or a restartpoint. */
     419        2612 :             do_restartpoint = RecoveryInProgress();
     420             : 
     421             :             /*
     422             :              * Atomically fetch the request flags to figure out what kind of a
     423             :              * checkpoint we should perform, and increase the started-counter
     424             :              * to acknowledge that we've started a new checkpoint.
     425             :              */
     426        2612 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
     427        2612 :             flags |= CheckpointerShmem->ckpt_flags;
     428        2612 :             CheckpointerShmem->ckpt_flags = 0;
     429        2612 :             CheckpointerShmem->ckpt_started++;
     430        2612 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
     431             : 
     432        2612 :             ConditionVariableBroadcast(&CheckpointerShmem->start_cv);
     433             : 
     434             :             /*
     435             :              * The end-of-recovery checkpoint is a real checkpoint that's
     436             :              * performed while we're still in recovery.
     437             :              */
     438        2612 :             if (flags & CHECKPOINT_END_OF_RECOVERY)
     439          40 :                 do_restartpoint = false;
     440             : 
     441        2612 :             if (chkpt_or_rstpt_timed)
     442             :             {
     443           2 :                 chkpt_or_rstpt_timed = false;
     444           2 :                 if (do_restartpoint)
     445           0 :                     PendingCheckpointerStats.restartpoints_timed++;
     446             :                 else
     447           2 :                     PendingCheckpointerStats.num_timed++;
     448             :             }
     449             : 
     450        2612 :             if (chkpt_or_rstpt_requested)
     451             :             {
     452        2610 :                 chkpt_or_rstpt_requested = false;
     453        2610 :                 if (do_restartpoint)
     454        1102 :                     PendingCheckpointerStats.restartpoints_requested++;
     455             :                 else
     456        1508 :                     PendingCheckpointerStats.num_requested++;
     457             :             }
     458             : 
     459             :             /*
     460             :              * We will warn if (a) too soon since last checkpoint (whatever
     461             :              * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
     462             :              * since the last checkpoint start.  Note in particular that this
     463             :              * implementation will not generate warnings caused by
     464             :              * CheckPointTimeout < CheckPointWarning.
     465             :              */
     466        2612 :             if (!do_restartpoint &&
     467        1510 :                 (flags & CHECKPOINT_CAUSE_XLOG) &&
     468         390 :                 elapsed_secs < CheckPointWarning)
     469         390 :                 ereport(LOG,
     470             :                         (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
     471             :                                        "checkpoints are occurring too frequently (%d seconds apart)",
     472             :                                        elapsed_secs,
     473             :                                        elapsed_secs),
     474             :                          errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
     475             : 
     476             :             /*
     477             :              * Initialize checkpointer-private variables used during
     478             :              * checkpoint.
     479             :              */
     480        2612 :             ckpt_active = true;
     481        2612 :             if (do_restartpoint)
     482        1102 :                 ckpt_start_recptr = GetXLogReplayRecPtr(NULL);
     483             :             else
     484        1510 :                 ckpt_start_recptr = GetInsertRecPtr();
     485        2612 :             ckpt_start_time = now;
     486        2612 :             ckpt_cached_elapsed = 0;
     487             : 
     488             :             /*
     489             :              * Do the checkpoint.
     490             :              */
     491        2612 :             if (!do_restartpoint)
     492        1510 :                 ckpt_performed = CreateCheckPoint(flags);
     493             :             else
     494        1102 :                 ckpt_performed = CreateRestartPoint(flags);
     495             : 
     496             :             /*
     497             :              * After any checkpoint, free all smgr objects.  Otherwise we
     498             :              * would never do so for dropped relations, as the checkpointer
     499             :              * does not process shared invalidation messages or call
     500             :              * AtEOXact_SMgr().
     501             :              */
     502        2612 :             smgrdestroyall();
     503             : 
     504             :             /*
     505             :              * Indicate checkpoint completion to any waiting backends.
     506             :              */
     507        2612 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
     508        2612 :             CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
     509        2612 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
     510             : 
     511        2612 :             ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
     512             : 
     513        2612 :             if (!do_restartpoint)
     514             :             {
     515             :                 /*
     516             :                  * Note we record the checkpoint start time not end time as
     517             :                  * last_checkpoint_time.  This is so that time-driven
     518             :                  * checkpoints happen at a predictable spacing.
     519             :                  */
     520        1510 :                 last_checkpoint_time = now;
     521             : 
     522        1510 :                 if (ckpt_performed)
     523        1508 :                     PendingCheckpointerStats.num_performed++;
     524             :             }
     525             :             else
     526             :             {
     527        1102 :                 if (ckpt_performed)
     528             :                 {
     529             :                     /*
     530             :                      * The same as for checkpoint. Please see the
     531             :                      * corresponding comment.
     532             :                      */
     533         354 :                     last_checkpoint_time = now;
     534             : 
     535         354 :                     PendingCheckpointerStats.restartpoints_performed++;
     536             :                 }
     537             :                 else
     538             :                 {
     539             :                     /*
     540             :                      * We were not able to perform the restartpoint
     541             :                      * (checkpoints throw an ERROR in case of error).  Most
     542             :                      * likely because we have not received any new checkpoint
     543             :                      * WAL records since the last restartpoint. Try again in
     544             :                      * 15 s.
     545             :                      */
     546         748 :                     last_checkpoint_time = now - CheckPointTimeout + 15;
     547             :                 }
     548             :             }
     549             : 
     550        2612 :             ckpt_active = false;
     551             : 
     552             :             /*
     553             :              * We may have received an interrupt during the checkpoint and the
     554             :              * latch might have been reset (e.g. in CheckpointWriteDelay).
     555             :              */
     556        2612 :             ProcessCheckpointerInterrupts();
     557        2612 :             if (ShutdownXLOGPending || ShutdownRequestPending)
     558             :                 break;
     559             :         }
     560             : 
     561             :         /*
     562             :          * Disable logical decoding if someone requested it. See comments atop
     563             :          * logicalctl.c.
     564             :          */
     565        8764 :         DisableLogicalDecodingIfNecessary();
     566             : 
     567             :         /* Check for archive_timeout and switch xlog files if necessary. */
     568        8764 :         CheckArchiveTimeout();
     569             : 
     570             :         /* Report pending statistics to the cumulative stats system */
     571        8764 :         pgstat_report_checkpointer();
     572        8764 :         pgstat_report_wal(true);
     573             : 
     574             :         /*
     575             :          * If any checkpoint flags have been set, redo the loop to handle the
     576             :          * checkpoint without sleeping.
     577             :          */
     578        8764 :         if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
     579         444 :             continue;
     580             : 
     581             :         /*
     582             :          * Sleep until we are signaled or it's time for another checkpoint or
     583             :          * xlog file switch.
     584             :          */
     585        8320 :         now = (pg_time_t) time(NULL);
     586        8320 :         elapsed_secs = now - last_checkpoint_time;
     587        8320 :         if (elapsed_secs >= CheckPointTimeout)
     588           0 :             continue;           /* no sleep for us ... */
     589        8320 :         cur_timeout = CheckPointTimeout - elapsed_secs;
     590        8320 :         if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
     591             :         {
     592           0 :             elapsed_secs = now - last_xlog_switch_time;
     593           0 :             if (elapsed_secs >= XLogArchiveTimeout)
     594           0 :                 continue;       /* no sleep for us ... */
     595           0 :             cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
     596             :         }
     597             : 
     598        8320 :         (void) WaitLatch(MyLatch,
     599             :                          WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
     600             :                          cur_timeout * 1000L /* convert to ms */ ,
     601             :                          WAIT_EVENT_CHECKPOINTER_MAIN);
     602             :     }
     603             : 
     604             :     /*
     605             :      * From here on, elog(ERROR) should end with exit(1), not send control
     606             :      * back to the sigsetjmp block above.
     607             :      */
     608        1130 :     ExitOnAnyError = true;
     609             : 
     610        1130 :     if (ShutdownXLOGPending)
     611             :     {
     612             :         /*
     613             :          * Close down the database.
     614             :          *
     615             :          * Since ShutdownXLOG() creates restartpoint or checkpoint, and
     616             :          * updates the statistics, increment the checkpoint request and flush
     617             :          * out pending statistic.
     618             :          */
     619        1130 :         PendingCheckpointerStats.num_requested++;
     620        1130 :         ShutdownXLOG(0, 0);
     621        1130 :         pgstat_report_checkpointer();
     622        1130 :         pgstat_report_wal(true);
     623             : 
     624             :         /*
     625             :          * Tell postmaster that we're done.
     626             :          */
     627        1130 :         SendPostmasterSignal(PMSIGNAL_XLOG_IS_SHUTDOWN);
     628        1130 :         ShutdownXLOGPending = false;
     629             :     }
     630             : 
     631             :     /*
     632             :      * Wait until we're asked to shut down. By separating the writing of the
     633             :      * shutdown checkpoint from checkpointer exiting, checkpointer can perform
     634             :      * some should-be-as-late-as-possible work like writing out stats.
     635             :      */
     636             :     for (;;)
     637             :     {
     638             :         /* Clear any already-pending wakeups */
     639        2258 :         ResetLatch(MyLatch);
     640             : 
     641        2258 :         ProcessCheckpointerInterrupts();
     642             : 
     643        2258 :         if (ShutdownRequestPending)
     644        1130 :             break;
     645             : 
     646        1128 :         (void) WaitLatch(MyLatch,
     647             :                          WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
     648             :                          0,
     649             :                          WAIT_EVENT_CHECKPOINTER_SHUTDOWN);
     650             :     }
     651             : 
     652             :     /* Normal exit from the checkpointer is here */
     653        1130 :     proc_exit(0);               /* done */
     654             : }
     655             : 
     656             : /*
     657             :  * Process any new interrupts.
     658             :  */
     659             : static void
     660       14764 : ProcessCheckpointerInterrupts(void)
     661             : {
     662       14764 :     if (ProcSignalBarrierPending)
     663         586 :         ProcessProcSignalBarrier();
     664             : 
     665       14764 :     if (ConfigReloadPending)
     666             :     {
     667         134 :         ConfigReloadPending = false;
     668         134 :         ProcessConfigFile(PGC_SIGHUP);
     669             : 
     670             :         /*
     671             :          * Checkpointer is the last process to shut down, so we ask it to hold
     672             :          * the keys for a range of other tasks required most of which have
     673             :          * nothing to do with checkpointing at all.
     674             :          *
     675             :          * For various reasons, some config values can change dynamically so
     676             :          * the primary copy of them is held in shared memory to make sure all
     677             :          * backends see the same value.  We make Checkpointer responsible for
     678             :          * updating the shared memory copy if the parameter setting changes
     679             :          * because of SIGHUP.
     680             :          */
     681         134 :         UpdateSharedMemoryConfig();
     682             :     }
     683             : 
     684             :     /* Perform logging of memory contexts of this process */
     685       14764 :     if (LogMemoryContextPending)
     686           2 :         ProcessLogMemoryContextInterrupt();
     687       14764 : }
     688             : 
     689             : /*
     690             :  * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
     691             :  *
     692             :  * This will switch to a new WAL file and force an archive file write if
     693             :  * meaningful activity is recorded in the current WAL file. This includes most
     694             :  * writes, including just a single checkpoint record, but excludes WAL records
     695             :  * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
     696             :  * snapshots of running transactions).  Such records, depending on
     697             :  * configuration, occur on regular intervals and don't contain important
     698             :  * information.  This avoids generating archives with a few unimportant
     699             :  * records.
     700             :  */
     701             : static void
     702       26128 : CheckArchiveTimeout(void)
     703             : {
     704             :     pg_time_t   now;
     705             :     pg_time_t   last_time;
     706             :     XLogRecPtr  last_switch_lsn;
     707             : 
     708       26128 :     if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
     709       26128 :         return;
     710             : 
     711           0 :     now = (pg_time_t) time(NULL);
     712             : 
     713             :     /* First we do a quick check using possibly-stale local state. */
     714           0 :     if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
     715           0 :         return;
     716             : 
     717             :     /*
     718             :      * Update local state ... note that last_xlog_switch_time is the last time
     719             :      * a switch was performed *or requested*.
     720             :      */
     721           0 :     last_time = GetLastSegSwitchData(&last_switch_lsn);
     722             : 
     723           0 :     last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
     724             : 
     725             :     /* Now we can do the real checks */
     726           0 :     if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
     727             :     {
     728             :         /*
     729             :          * Switch segment only when "important" WAL has been logged since the
     730             :          * last segment switch (last_switch_lsn points to end of segment
     731             :          * switch occurred in).
     732             :          */
     733           0 :         if (GetLastImportantRecPtr() > last_switch_lsn)
     734             :         {
     735             :             XLogRecPtr  switchpoint;
     736             : 
     737             :             /* mark switch as unimportant, avoids triggering checkpoints */
     738           0 :             switchpoint = RequestXLogSwitch(true);
     739             : 
     740             :             /*
     741             :              * If the returned pointer points exactly to a segment boundary,
     742             :              * assume nothing happened.
     743             :              */
     744           0 :             if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
     745           0 :                 elog(DEBUG1, "write-ahead log switch forced (\"archive_timeout\"=%d)",
     746             :                      XLogArchiveTimeout);
     747             :         }
     748             : 
     749             :         /*
     750             :          * Update state in any case, so we don't retry constantly when the
     751             :          * system is idle.
     752             :          */
     753           0 :         last_xlog_switch_time = now;
     754             :     }
     755             : }
     756             : 
     757             : /*
     758             :  * Returns true if a fast checkpoint request is pending.  (Note that this does
     759             :  * not check the *current* checkpoint's FAST flag, but whether there is one
     760             :  * pending behind it.)
     761             :  */
     762             : static bool
     763       93294 : FastCheckpointRequested(void)
     764             : {
     765       93294 :     volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
     766             : 
     767             :     /*
     768             :      * We don't need to acquire the ckpt_lck in this case because we're only
     769             :      * looking at a single flag bit.
     770             :      */
     771       93294 :     if (cps->ckpt_flags & CHECKPOINT_FAST)
     772       14876 :         return true;
     773       78418 :     return false;
     774             : }
     775             : 
     776             : /*
     777             :  * CheckpointWriteDelay -- control rate of checkpoint
     778             :  *
     779             :  * This function is called after each page write performed by BufferSync().
     780             :  * It is responsible for throttling BufferSync()'s write rate to hit
     781             :  * checkpoint_completion_target.
     782             :  *
     783             :  * The checkpoint request flags should be passed in; currently the only one
     784             :  * examined is CHECKPOINT_FAST, which disables delays between writes.
     785             :  *
     786             :  * 'progress' is an estimate of how much of the work has been done, as a
     787             :  * fraction between 0.0 meaning none, and 1.0 meaning all done.
     788             :  */
     789             : void
     790      589912 : CheckpointWriteDelay(int flags, double progress)
     791             : {
     792             :     static int  absorb_counter = WRITES_PER_ABSORB;
     793             : 
     794             :     /* Do nothing if checkpoint is being executed by non-checkpointer process */
     795      589912 :     if (!AmCheckpointerProcess())
     796      103450 :         return;
     797             : 
     798             :     /*
     799             :      * Perform the usual duties and take a nap, unless we're behind schedule,
     800             :      * in which case we just try to catch up as quickly as possible.
     801             :      */
     802      486462 :     if (!(flags & CHECKPOINT_FAST) &&
     803       93670 :         !ShutdownXLOGPending &&
     804       93294 :         !ShutdownRequestPending &&
     805      171712 :         !FastCheckpointRequested() &&
     806       78418 :         IsCheckpointOnSchedule(progress))
     807             :     {
     808       17364 :         if (ConfigReloadPending)
     809             :         {
     810           0 :             ConfigReloadPending = false;
     811           0 :             ProcessConfigFile(PGC_SIGHUP);
     812             :             /* update shmem copies of config variables */
     813           0 :             UpdateSharedMemoryConfig();
     814             :         }
     815             : 
     816       17364 :         AbsorbSyncRequests();
     817       17364 :         absorb_counter = WRITES_PER_ABSORB;
     818             : 
     819       17364 :         CheckArchiveTimeout();
     820             : 
     821             :         /* Report interim statistics to the cumulative stats system */
     822       17364 :         pgstat_report_checkpointer();
     823             : 
     824             :         /*
     825             :          * This sleep used to be connected to bgwriter_delay, typically 200ms.
     826             :          * That resulted in more frequent wakeups if not much work to do.
     827             :          * Checkpointer and bgwriter are no longer related so take the Big
     828             :          * Sleep.
     829             :          */
     830       17364 :         WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT,
     831             :                   100,
     832             :                   WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
     833       17364 :         ResetLatch(MyLatch);
     834             :     }
     835      469098 :     else if (--absorb_counter <= 0)
     836             :     {
     837             :         /*
     838             :          * Absorb pending fsync requests after each WRITES_PER_ABSORB write
     839             :          * operations even when we don't sleep, to prevent overflow of the
     840             :          * fsync request queue.
     841             :          */
     842         204 :         AbsorbSyncRequests();
     843         204 :         absorb_counter = WRITES_PER_ABSORB;
     844             :     }
     845             : 
     846             :     /* Check for barrier events. */
     847      486462 :     if (ProcSignalBarrierPending)
     848          14 :         ProcessProcSignalBarrier();
     849             : }
     850             : 
     851             : /*
     852             :  * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
     853             :  *       (or restartpoint) in time?
     854             :  *
     855             :  * Compares the current progress against the time/segments elapsed since last
     856             :  * checkpoint, and returns true if the progress we've made this far is greater
     857             :  * than the elapsed time/segments.
     858             :  */
     859             : static bool
     860       78418 : IsCheckpointOnSchedule(double progress)
     861             : {
     862             :     XLogRecPtr  recptr;
     863             :     struct timeval now;
     864             :     double      elapsed_xlogs,
     865             :                 elapsed_time;
     866             : 
     867             :     Assert(ckpt_active);
     868             : 
     869             :     /* Scale progress according to checkpoint_completion_target. */
     870       78418 :     progress *= CheckPointCompletionTarget;
     871             : 
     872             :     /*
     873             :      * Check against the cached value first. Only do the more expensive
     874             :      * calculations once we reach the target previously calculated. Since
     875             :      * neither time or WAL insert pointer moves backwards, a freshly
     876             :      * calculated value can only be greater than or equal to the cached value.
     877             :      */
     878       78418 :     if (progress < ckpt_cached_elapsed)
     879       53468 :         return false;
     880             : 
     881             :     /*
     882             :      * Check progress against WAL segments written and CheckPointSegments.
     883             :      *
     884             :      * We compare the current WAL insert location against the location
     885             :      * computed before calling CreateCheckPoint. The code in XLogInsert that
     886             :      * actually triggers a checkpoint when CheckPointSegments is exceeded
     887             :      * compares against RedoRecPtr, so this is not completely accurate.
     888             :      * However, it's good enough for our purposes, we're only calculating an
     889             :      * estimate anyway.
     890             :      *
     891             :      * During recovery, we compare last replayed WAL record's location with
     892             :      * the location computed before calling CreateRestartPoint. That maintains
     893             :      * the same pacing as we have during checkpoints in normal operation, but
     894             :      * we might exceed max_wal_size by a fair amount. That's because there can
     895             :      * be a large gap between a checkpoint's redo-pointer and the checkpoint
     896             :      * record itself, and we only start the restartpoint after we've seen the
     897             :      * checkpoint record. (The gap is typically up to CheckPointSegments *
     898             :      * checkpoint_completion_target where checkpoint_completion_target is the
     899             :      * value that was in effect when the WAL was generated).
     900             :      */
     901       24950 :     if (RecoveryInProgress())
     902       12114 :         recptr = GetXLogReplayRecPtr(NULL);
     903             :     else
     904       12836 :         recptr = GetInsertRecPtr();
     905       24950 :     elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
     906       24950 :                      wal_segment_size) / CheckPointSegments;
     907             : 
     908       24950 :     if (progress < elapsed_xlogs)
     909             :     {
     910        7586 :         ckpt_cached_elapsed = elapsed_xlogs;
     911        7586 :         return false;
     912             :     }
     913             : 
     914             :     /*
     915             :      * Check progress against time elapsed and checkpoint_timeout.
     916             :      */
     917       17364 :     gettimeofday(&now, NULL);
     918       17364 :     elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
     919       17364 :                     now.tv_usec / 1000000.0) / CheckPointTimeout;
     920             : 
     921       17364 :     if (progress < elapsed_time)
     922             :     {
     923           0 :         ckpt_cached_elapsed = elapsed_time;
     924           0 :         return false;
     925             :     }
     926             : 
     927             :     /* It looks like we're on schedule. */
     928       17364 :     return true;
     929             : }
     930             : 
     931             : 
     932             : /* --------------------------------
     933             :  *      signal handler routines
     934             :  * --------------------------------
     935             :  */
     936             : 
     937             : /* SIGINT: set flag to trigger writing of shutdown checkpoint */
     938             : static void
     939        1132 : ReqShutdownXLOG(SIGNAL_ARGS)
     940             : {
     941        1132 :     ShutdownXLOGPending = true;
     942        1132 :     SetLatch(MyLatch);
     943        1132 : }
     944             : 
     945             : 
     946             : /* --------------------------------
     947             :  *      communication with backends
     948             :  * --------------------------------
     949             :  */
     950             : 
     951             : /*
     952             :  * CheckpointerShmemSize
     953             :  *      Compute space needed for checkpointer-related shared memory
     954             :  */
     955             : Size
     956        6534 : CheckpointerShmemSize(void)
     957             : {
     958             :     Size        size;
     959             : 
     960             :     /*
     961             :      * The size of the requests[] array is arbitrarily set equal to NBuffers.
     962             :      * But there is a cap of MAX_CHECKPOINT_REQUESTS to prevent accumulating
     963             :      * too many checkpoint requests in the ring buffer.
     964             :      */
     965        6534 :     size = offsetof(CheckpointerShmemStruct, requests);
     966        6534 :     size = add_size(size, mul_size(Min(NBuffers,
     967             :                                        MAX_CHECKPOINT_REQUESTS),
     968             :                                    sizeof(CheckpointerRequest)));
     969             : 
     970        6534 :     return size;
     971             : }
     972             : 
     973             : /*
     974             :  * CheckpointerShmemInit
     975             :  *      Allocate and initialize checkpointer-related shared memory
     976             :  */
     977             : void
     978        2280 : CheckpointerShmemInit(void)
     979             : {
     980        2280 :     Size        size = CheckpointerShmemSize();
     981             :     bool        found;
     982             : 
     983        2280 :     CheckpointerShmem = (CheckpointerShmemStruct *)
     984        2280 :         ShmemInitStruct("Checkpointer Data",
     985             :                         size,
     986             :                         &found);
     987             : 
     988        2280 :     if (!found)
     989             :     {
     990             :         /*
     991             :          * First time through, so initialize.  Note that we zero the whole
     992             :          * requests array; this is so that CompactCheckpointerRequestQueue can
     993             :          * assume that any pad bytes in the request structs are zeroes.
     994             :          */
     995        2568 :         MemSet(CheckpointerShmem, 0, size);
     996        2280 :         SpinLockInit(&CheckpointerShmem->ckpt_lck);
     997        2280 :         CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS);
     998        2280 :         CheckpointerShmem->head = CheckpointerShmem->tail = 0;
     999        2280 :         ConditionVariableInit(&CheckpointerShmem->start_cv);
    1000        2280 :         ConditionVariableInit(&CheckpointerShmem->done_cv);
    1001             :     }
    1002        2280 : }
    1003             : 
    1004             : /*
    1005             :  * ExecCheckpoint
    1006             :  *      Primary entry point for manual CHECKPOINT commands
    1007             :  *
    1008             :  * This is mainly a wrapper for RequestCheckpoint().
    1009             :  */
    1010             : void
    1011         896 : ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt)
    1012             : {
    1013         896 :     bool        fast = true;
    1014         896 :     bool        unlogged = false;
    1015             : 
    1016        1798 :     foreach_ptr(DefElem, opt, stmt->options)
    1017             :     {
    1018          30 :         if (strcmp(opt->defname, "mode") == 0)
    1019             :         {
    1020          12 :             char       *mode = defGetString(opt);
    1021             : 
    1022          12 :             if (strcmp(mode, "spread") == 0)
    1023           0 :                 fast = false;
    1024          12 :             else if (strcmp(mode, "fast") != 0)
    1025           6 :                 ereport(ERROR,
    1026             :                         (errcode(ERRCODE_SYNTAX_ERROR),
    1027             :                          errmsg("unrecognized value for %s option \"%s\": \"%s\"",
    1028             :                                 "CHECKPOINT", "mode", mode),
    1029             :                          parser_errposition(pstate, opt->location)));
    1030             :         }
    1031          18 :         else if (strcmp(opt->defname, "flush_unlogged") == 0)
    1032          12 :             unlogged = defGetBoolean(opt);
    1033             :         else
    1034           6 :             ereport(ERROR,
    1035             :                     (errcode(ERRCODE_SYNTAX_ERROR),
    1036             :                      errmsg("unrecognized %s option \"%s\"",
    1037             :                             "CHECKPOINT", opt->defname),
    1038             :                      parser_errposition(pstate, opt->location)));
    1039             :     }
    1040             : 
    1041         884 :     if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT))
    1042           0 :         ereport(ERROR,
    1043             :                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
    1044             :         /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */
    1045             :                  errmsg("permission denied to execute %s command",
    1046             :                         "CHECKPOINT"),
    1047             :                  errdetail("Only roles with privileges of the \"%s\" role may execute this command.",
    1048             :                            "pg_checkpoint")));
    1049             : 
    1050        1768 :     RequestCheckpoint(CHECKPOINT_WAIT |
    1051         884 :                       (fast ? CHECKPOINT_FAST : 0) |
    1052         884 :                       (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) |
    1053         884 :                       (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
    1054         884 : }
    1055             : 
    1056             : /*
    1057             :  * RequestCheckpoint
    1058             :  *      Called in backend processes to request a checkpoint
    1059             :  *
    1060             :  * flags is a bitwise OR of the following:
    1061             :  *  CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
    1062             :  *  CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
    1063             :  *  CHECKPOINT_FAST: finish the checkpoint ASAP,
    1064             :  *      ignoring checkpoint_completion_target parameter.
    1065             :  *  CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
    1066             :  *      since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
    1067             :  *      CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command).
    1068             :  *  CHECKPOINT_WAIT: wait for completion before returning (otherwise,
    1069             :  *      just signal checkpointer to do it, and return).
    1070             :  *  CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
    1071             :  *      (This affects logging, and in particular enables CheckPointWarning.)
    1072             :  */
    1073             : void
    1074        5134 : RequestCheckpoint(int flags)
    1075             : {
    1076             :     int         ntries;
    1077             :     int         old_failed,
    1078             :                 old_started;
    1079             : 
    1080             :     /*
    1081             :      * If in a standalone backend, just do it ourselves.
    1082             :      */
    1083        5134 :     if (!IsPostmasterEnvironment)
    1084             :     {
    1085             :         /*
    1086             :          * There's no point in doing slow checkpoints in a standalone backend,
    1087             :          * because there's no other backends the checkpoint could disrupt.
    1088             :          */
    1089         410 :         CreateCheckPoint(flags | CHECKPOINT_FAST);
    1090             : 
    1091             :         /* Free all smgr objects, as CheckpointerMain() normally would. */
    1092         410 :         smgrdestroyall();
    1093             : 
    1094         410 :         return;
    1095             :     }
    1096             : 
    1097             :     /*
    1098             :      * Atomically set the request flags, and take a snapshot of the counters.
    1099             :      * When we see ckpt_started > old_started, we know the flags we set here
    1100             :      * have been seen by checkpointer.
    1101             :      *
    1102             :      * Note that we OR the flags with any existing flags, to avoid overriding
    1103             :      * a "stronger" request by another backend.  The flag senses must be
    1104             :      * chosen to make this work!
    1105             :      */
    1106        4724 :     SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1107             : 
    1108        4724 :     old_failed = CheckpointerShmem->ckpt_failed;
    1109        4724 :     old_started = CheckpointerShmem->ckpt_started;
    1110        4724 :     CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED);
    1111             : 
    1112        4724 :     SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1113             : 
    1114             :     /*
    1115             :      * Set checkpointer's latch to request checkpoint.  It's possible that the
    1116             :      * checkpointer hasn't started yet, so we will retry a few times if
    1117             :      * needed.  (Actually, more than a few times, since on slow or overloaded
    1118             :      * buildfarm machines, it's been observed that the checkpointer can take
    1119             :      * several seconds to start.)  However, if not told to wait for the
    1120             :      * checkpoint to occur, we consider failure to set the latch to be
    1121             :      * nonfatal and merely LOG it.  The checkpointer should see the request
    1122             :      * when it does start, with or without the SetLatch().
    1123             :      */
    1124             : #define MAX_SIGNAL_TRIES 600    /* max wait 60.0 sec */
    1125        4724 :     for (ntries = 0;; ntries++)
    1126          56 :     {
    1127        4780 :         volatile PROC_HDR *procglobal = ProcGlobal;
    1128        4780 :         ProcNumber  checkpointerProc = procglobal->checkpointerProc;
    1129             : 
    1130        4780 :         if (checkpointerProc == INVALID_PROC_NUMBER)
    1131             :         {
    1132          66 :             if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
    1133             :             {
    1134          10 :                 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
    1135             :                      "could not notify checkpoint: checkpointer is not running");
    1136          10 :                 break;
    1137             :             }
    1138             :         }
    1139             :         else
    1140             :         {
    1141        4714 :             SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
    1142             :             /* notified successfully */
    1143        4714 :             break;
    1144             :         }
    1145             : 
    1146          56 :         CHECK_FOR_INTERRUPTS();
    1147          56 :         pg_usleep(100000L);     /* wait 0.1 sec, then retry */
    1148             :     }
    1149             : 
    1150             :     /*
    1151             :      * If requested, wait for completion.  We detect completion according to
    1152             :      * the algorithm given above.
    1153             :      */
    1154        4724 :     if (flags & CHECKPOINT_WAIT)
    1155             :     {
    1156             :         int         new_started,
    1157             :                     new_failed;
    1158             : 
    1159             :         /* Wait for a new checkpoint to start. */
    1160        1650 :         ConditionVariablePrepareToSleep(&CheckpointerShmem->start_cv);
    1161             :         for (;;)
    1162             :         {
    1163        2922 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1164        2922 :             new_started = CheckpointerShmem->ckpt_started;
    1165        2922 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1166             : 
    1167        2922 :             if (new_started != old_started)
    1168        1650 :                 break;
    1169             : 
    1170        1272 :             ConditionVariableSleep(&CheckpointerShmem->start_cv,
    1171             :                                    WAIT_EVENT_CHECKPOINT_START);
    1172             :         }
    1173        1650 :         ConditionVariableCancelSleep();
    1174             : 
    1175             :         /*
    1176             :          * We are waiting for ckpt_done >= new_started, in a modulo sense.
    1177             :          */
    1178        1650 :         ConditionVariablePrepareToSleep(&CheckpointerShmem->done_cv);
    1179             :         for (;;)
    1180        1244 :         {
    1181             :             int         new_done;
    1182             : 
    1183        2894 :             SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1184        2894 :             new_done = CheckpointerShmem->ckpt_done;
    1185        2894 :             new_failed = CheckpointerShmem->ckpt_failed;
    1186        2894 :             SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1187             : 
    1188        2894 :             if (new_done - new_started >= 0)
    1189        1650 :                 break;
    1190             : 
    1191        1244 :             ConditionVariableSleep(&CheckpointerShmem->done_cv,
    1192             :                                    WAIT_EVENT_CHECKPOINT_DONE);
    1193             :         }
    1194        1650 :         ConditionVariableCancelSleep();
    1195             : 
    1196        1650 :         if (new_failed != old_failed)
    1197           0 :             ereport(ERROR,
    1198             :                     (errmsg("checkpoint request failed"),
    1199             :                      errhint("Consult recent messages in the server log for details.")));
    1200             :     }
    1201             : }
    1202             : 
    1203             : /*
    1204             :  * ForwardSyncRequest
    1205             :  *      Forward a file-fsync request from a backend to the checkpointer
    1206             :  *
    1207             :  * Whenever a backend is compelled to write directly to a relation
    1208             :  * (which should be seldom, if the background writer is getting its job done),
    1209             :  * the backend calls this routine to pass over knowledge that the relation
    1210             :  * is dirty and must be fsync'd before next checkpoint.  We also use this
    1211             :  * opportunity to count such writes for statistical purposes.
    1212             :  *
    1213             :  * To avoid holding the lock for longer than necessary, we normally write
    1214             :  * to the requests[] queue without checking for duplicates.  The checkpointer
    1215             :  * will have to eliminate dups internally anyway.  However, if we discover
    1216             :  * that the queue is full, we make a pass over the entire queue to compact
    1217             :  * it.  This is somewhat expensive, but the alternative is for the backend
    1218             :  * to perform its own fsync, which is far more expensive in practice.  It
    1219             :  * is theoretically possible a backend fsync might still be necessary, if
    1220             :  * the queue is full and contains no duplicate entries.  In that case, we
    1221             :  * let the backend know by returning false.
    1222             :  */
    1223             : bool
    1224     2524840 : ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
    1225             : {
    1226             :     CheckpointerRequest *request;
    1227             :     bool        too_full;
    1228             :     int         insert_pos;
    1229             : 
    1230     2524840 :     if (!IsUnderPostmaster)
    1231           0 :         return false;           /* probably shouldn't even get here */
    1232             : 
    1233     2524840 :     if (AmCheckpointerProcess())
    1234           0 :         elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
    1235             : 
    1236     2524840 :     LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
    1237             : 
    1238             :     /*
    1239             :      * If the checkpointer isn't running or the request queue is full, the
    1240             :      * backend will have to perform its own fsync request.  But before forcing
    1241             :      * that to happen, we can try to compact the request queue.
    1242             :      */
    1243     2524840 :     if (CheckpointerShmem->checkpointer_pid == 0 ||
    1244     2521586 :         (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
    1245        2470 :          !CompactCheckpointerRequestQueue()))
    1246             :     {
    1247        5068 :         LWLockRelease(CheckpointerCommLock);
    1248        5068 :         return false;
    1249             :     }
    1250             : 
    1251             :     /* OK, insert request */
    1252     2519772 :     insert_pos = CheckpointerShmem->tail;
    1253     2519772 :     request = &CheckpointerShmem->requests[insert_pos];
    1254     2519772 :     request->ftag = *ftag;
    1255     2519772 :     request->type = type;
    1256             : 
    1257     2519772 :     CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests;
    1258     2519772 :     CheckpointerShmem->num_requests++;
    1259             : 
    1260             :     /* If queue is more than half full, nudge the checkpointer to empty it */
    1261     2519772 :     too_full = (CheckpointerShmem->num_requests >=
    1262     2519772 :                 CheckpointerShmem->max_requests / 2);
    1263             : 
    1264     2519772 :     LWLockRelease(CheckpointerCommLock);
    1265             : 
    1266             :     /* ... but not till after we release the lock */
    1267     2519772 :     if (too_full)
    1268             :     {
    1269       63886 :         volatile PROC_HDR *procglobal = ProcGlobal;
    1270       63886 :         ProcNumber  checkpointerProc = procglobal->checkpointerProc;
    1271             : 
    1272       63886 :         if (checkpointerProc != INVALID_PROC_NUMBER)
    1273       63886 :             SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
    1274             :     }
    1275             : 
    1276     2519772 :     return true;
    1277             : }
    1278             : 
    1279             : /*
    1280             :  * CompactCheckpointerRequestQueue
    1281             :  *      Remove duplicates from the request queue to avoid backend fsyncs.
    1282             :  *      Returns "true" if any entries were removed.
    1283             :  *
    1284             :  * Although a full fsync request queue is not common, it can lead to severe
    1285             :  * performance problems when it does happen.  So far, this situation has
    1286             :  * only been observed to occur when the system is under heavy write load,
    1287             :  * and especially during the "sync" phase of a checkpoint.  Without this
    1288             :  * logic, each backend begins doing an fsync for every block written, which
    1289             :  * gets very expensive and can slow down the whole system.
    1290             :  *
    1291             :  * Trying to do this every time the queue is full could lose if there
    1292             :  * aren't any removable entries.  But that should be vanishingly rare in
    1293             :  * practice: there's one queue entry per shared buffer.
    1294             :  */
    1295             : static bool
    1296        2470 : CompactCheckpointerRequestQueue(void)
    1297             : {
    1298             :     struct CheckpointerSlotMapping
    1299             :     {
    1300             :         CheckpointerRequest request;
    1301             :         int         ring_idx;
    1302             :     };
    1303             : 
    1304             :     int         n;
    1305        2470 :     int         num_skipped = 0;
    1306             :     int         head;
    1307             :     int         max_requests;
    1308             :     int         num_requests;
    1309             :     int         read_idx,
    1310             :                 write_idx;
    1311             :     HASHCTL     ctl;
    1312             :     HTAB       *htab;
    1313             :     bool       *skip_slot;
    1314             : 
    1315             :     /* must hold CheckpointerCommLock in exclusive mode */
    1316             :     Assert(LWLockHeldByMe(CheckpointerCommLock));
    1317             : 
    1318             :     /* Avoid memory allocations in a critical section. */
    1319        2470 :     if (CritSectionCount > 0)
    1320           0 :         return false;
    1321             : 
    1322        2470 :     max_requests = CheckpointerShmem->max_requests;
    1323        2470 :     num_requests = CheckpointerShmem->num_requests;
    1324             : 
    1325             :     /* Initialize skip_slot array */
    1326        2470 :     skip_slot = palloc0_array(bool, max_requests);
    1327             : 
    1328        2470 :     head = CheckpointerShmem->head;
    1329             : 
    1330             :     /* Initialize temporary hash table */
    1331        2470 :     ctl.keysize = sizeof(CheckpointerRequest);
    1332        2470 :     ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
    1333        2470 :     ctl.hcxt = CurrentMemoryContext;
    1334             : 
    1335        2470 :     htab = hash_create("CompactCheckpointerRequestQueue",
    1336        2470 :                        CheckpointerShmem->num_requests,
    1337             :                        &ctl,
    1338             :                        HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
    1339             : 
    1340             :     /*
    1341             :      * The basic idea here is that a request can be skipped if it's followed
    1342             :      * by a later, identical request.  It might seem more sensible to work
    1343             :      * backwards from the end of the queue and check whether a request is
    1344             :      * *preceded* by an earlier, identical request, in the hopes of doing less
    1345             :      * copying.  But that might change the semantics, if there's an
    1346             :      * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
    1347             :      * this way.  It would be possible to be even smarter if we made the code
    1348             :      * below understand the specific semantics of such requests (it could blow
    1349             :      * away preceding entries that would end up being canceled anyhow), but
    1350             :      * it's not clear that the extra complexity would buy us anything.
    1351             :      */
    1352        2470 :     read_idx = head;
    1353      312134 :     for (n = 0; n < num_requests; n++)
    1354             :     {
    1355             :         CheckpointerRequest *request;
    1356             :         struct CheckpointerSlotMapping *slotmap;
    1357             :         bool        found;
    1358             : 
    1359             :         /*
    1360             :          * We use the request struct directly as a hashtable key.  This
    1361             :          * assumes that any padding bytes in the structs are consistently the
    1362             :          * same, which should be okay because we zeroed them in
    1363             :          * CheckpointerShmemInit.  Note also that RelFileLocator had better
    1364             :          * contain no pad bytes.
    1365             :          */
    1366      309664 :         request = &CheckpointerShmem->requests[read_idx];
    1367      309664 :         slotmap = hash_search(htab, request, HASH_ENTER, &found);
    1368      309664 :         if (found)
    1369             :         {
    1370             :             /* Duplicate, so mark the previous occurrence as skippable */
    1371       25782 :             skip_slot[slotmap->ring_idx] = true;
    1372       25782 :             num_skipped++;
    1373             :         }
    1374             :         /* Remember slot containing latest occurrence of this request value */
    1375      309664 :         slotmap->ring_idx = read_idx;
    1376             : 
    1377             :         /* Move to the next request in the ring buffer */
    1378      309664 :         read_idx = (read_idx + 1) % max_requests;
    1379             :     }
    1380             : 
    1381             :     /* Done with the hash table. */
    1382        2470 :     hash_destroy(htab);
    1383             : 
    1384             :     /* If no duplicates, we're out of luck. */
    1385        2470 :     if (!num_skipped)
    1386             :     {
    1387        1814 :         pfree(skip_slot);
    1388        1814 :         return false;
    1389             :     }
    1390             : 
    1391             :     /* We found some duplicates; remove them. */
    1392         656 :     read_idx = write_idx = head;
    1393       78128 :     for (n = 0; n < num_requests; n++)
    1394             :     {
    1395             :         /* If this slot is NOT skipped, keep it */
    1396       77472 :         if (!skip_slot[read_idx])
    1397             :         {
    1398             :             /* If the read and write positions are different, copy the request */
    1399       51690 :             if (write_idx != read_idx)
    1400       29960 :                 CheckpointerShmem->requests[write_idx] =
    1401       29960 :                     CheckpointerShmem->requests[read_idx];
    1402             : 
    1403             :             /* Advance the write position */
    1404       51690 :             write_idx = (write_idx + 1) % max_requests;
    1405             :         }
    1406             : 
    1407       77472 :         read_idx = (read_idx + 1) % max_requests;
    1408             :     }
    1409             : 
    1410             :     /*
    1411             :      * Update ring buffer state: head remains the same, tail moves, count
    1412             :      * decreases
    1413             :      */
    1414         656 :     CheckpointerShmem->tail = write_idx;
    1415         656 :     CheckpointerShmem->num_requests -= num_skipped;
    1416             : 
    1417         656 :     ereport(DEBUG1,
    1418             :             (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
    1419             :                              num_requests, CheckpointerShmem->num_requests)));
    1420             : 
    1421             :     /* Cleanup. */
    1422         656 :     pfree(skip_slot);
    1423         656 :     return true;
    1424             : }
    1425             : 
    1426             : /*
    1427             :  * AbsorbSyncRequests
    1428             :  *      Retrieve queued sync requests and pass them to sync mechanism.
    1429             :  *
    1430             :  * This is exported because it must be called during CreateCheckPoint;
    1431             :  * we have to be sure we have accepted all pending requests just before
    1432             :  * we start fsync'ing.  Since CreateCheckPoint sometimes runs in
    1433             :  * non-checkpointer processes, do nothing if not checkpointer.
    1434             :  */
    1435             : void
    1436       41064 : AbsorbSyncRequests(void)
    1437             : {
    1438       41064 :     CheckpointerRequest *requests = NULL;
    1439             :     CheckpointerRequest *request;
    1440             :     int         n,
    1441             :                 i;
    1442             :     bool        loop;
    1443             : 
    1444       41064 :     if (!AmCheckpointerProcess())
    1445        1304 :         return;
    1446             : 
    1447             :     do
    1448             :     {
    1449       39760 :         LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
    1450             : 
    1451             :         /*---
    1452             :          * We try to avoid holding the lock for a long time by:
    1453             :          * 1. Copying the request array and processing the requests after
    1454             :          *    releasing the lock;
    1455             :          * 2. Processing not the whole queue, but only batches of
    1456             :          *    CKPT_REQ_BATCH_SIZE at once.
    1457             :          *
    1458             :          * Once we have cleared the requests from shared memory, we must
    1459             :          * PANIC if we then fail to absorb them (e.g., because our hashtable
    1460             :          * runs out of memory).  This is because the system cannot run safely
    1461             :          * if we are unable to fsync what we have been told to fsync.
    1462             :          * Fortunately, the hashtable is so small that the problem is quite
    1463             :          * unlikely to arise in practice.
    1464             :          *
    1465             :          * Note: The maximum possible size of a ring buffer is
    1466             :          * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc
    1467             :          * allocation size of 1Gb.  Our maximum batch size,
    1468             :          * CKPT_REQ_BATCH_SIZE, is even smaller.
    1469             :          */
    1470       39760 :         n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE);
    1471       39760 :         if (n > 0)
    1472             :         {
    1473       19422 :             if (!requests)
    1474       19422 :                 requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
    1475             : 
    1476     2337456 :             for (i = 0; i < n; i++)
    1477             :             {
    1478     2318034 :                 requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head];
    1479     2318034 :                 CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests;
    1480             :             }
    1481             : 
    1482       19422 :             CheckpointerShmem->num_requests -= n;
    1483             : 
    1484             :         }
    1485             : 
    1486       39760 :         START_CRIT_SECTION();
    1487             : 
    1488             :         /* Are there any requests in the queue? If so, keep going. */
    1489       39760 :         loop = CheckpointerShmem->num_requests != 0;
    1490             : 
    1491       39760 :         LWLockRelease(CheckpointerCommLock);
    1492             : 
    1493     2357794 :         for (request = requests; n > 0; request++, n--)
    1494     2318034 :             RememberSyncRequest(&request->ftag, request->type);
    1495             : 
    1496       39760 :         END_CRIT_SECTION();
    1497       39760 :     } while (loop);
    1498             : 
    1499       39760 :     if (requests)
    1500       19422 :         pfree(requests);
    1501             : }
    1502             : 
    1503             : /*
    1504             :  * Update any shared memory configurations based on config parameters
    1505             :  */
    1506             : static void
    1507        1276 : UpdateSharedMemoryConfig(void)
    1508             : {
    1509             :     /* update global shmem state for sync rep */
    1510        1276 :     SyncRepUpdateSyncStandbysDefined();
    1511             : 
    1512             :     /*
    1513             :      * If full_page_writes has been changed by SIGHUP, we update it in shared
    1514             :      * memory and write an XLOG_FPW_CHANGE record.
    1515             :      */
    1516        1276 :     UpdateFullPageWrites();
    1517             : 
    1518        1276 :     elog(DEBUG2, "checkpointer updated shared memory configuration values");
    1519        1276 : }
    1520             : 
    1521             : /*
    1522             :  * FirstCallSinceLastCheckpoint allows a process to take an action once
    1523             :  * per checkpoint cycle by asynchronously checking for checkpoint completion.
    1524             :  */
    1525             : bool
    1526       29572 : FirstCallSinceLastCheckpoint(void)
    1527             : {
    1528             :     static int  ckpt_done = 0;
    1529             :     int         new_done;
    1530       29572 :     bool        FirstCall = false;
    1531             : 
    1532       29572 :     SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
    1533       29572 :     new_done = CheckpointerShmem->ckpt_done;
    1534       29572 :     SpinLockRelease(&CheckpointerShmem->ckpt_lck);
    1535             : 
    1536       29572 :     if (new_done != ckpt_done)
    1537        1260 :         FirstCall = true;
    1538             : 
    1539       29572 :     ckpt_done = new_done;
    1540             : 
    1541       29572 :     return FirstCall;
    1542             : }
    1543             : 
    1544             : /*
    1545             :  * Wake up the checkpointer process.
    1546             :  */
    1547             : void
    1548        1884 : WakeupCheckpointer(void)
    1549             : {
    1550        1884 :     volatile PROC_HDR *procglobal = ProcGlobal;
    1551        1884 :     ProcNumber  checkpointerProc = procglobal->checkpointerProc;
    1552             : 
    1553        1884 :     if (checkpointerProc != INVALID_PROC_NUMBER)
    1554        1256 :         SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
    1555        1884 : }

Generated by: LCOV version 1.16